diff --git a/spaces/0xqtpie/doodle2vid/app.py b/spaces/0xqtpie/doodle2vid/app.py deleted file mode 100644 index bddc8507f3aad1990cc5cfe58f34f274e32fa64a..0000000000000000000000000000000000000000 --- a/spaces/0xqtpie/doodle2vid/app.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/env python - -import os -import random - -import gradio as gr -import numpy as np -import PIL.Image -import torch -import torchvision.transforms.functional as TF -from diffusers import ( - AutoencoderKL, - EulerAncestralDiscreteScheduler, - StableDiffusionXLAdapterPipeline, - T2IAdapter, -) - -from modelscope.pipelines import pipeline -from modelscope.outputs import OutputKeys - -DESCRIPTION = '''# doodle2vid -Combining T2I-Adapter-SDXL with MS-Image2Video to create a doodle to video pipeline. -Shout-out to [fffiloni](https://huggingface.co/fffiloni) & [ARC Lab, Tencent PCG](https://huggingface.co/TencentARC) 🗣️ - -How to use: Draw a doodle in the canvas, and click "Run" to generate a video. -You can also provide a prompt with more details and choose a style. -''' - -if not torch.cuda.is_available(): - DESCRIPTION += "\n

Running on CPU 🥶 This demo does not work on CPU.

" - -style_list = [ - { - "name": "(No style)", - "prompt": "{prompt}", - "negative_prompt": "", - }, - { - "name": "Cinematic", - "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy", - "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured", - }, - { - "name": "3D Model", - "prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting", - "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting", - }, - { - "name": "Anime", - "prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed", - "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast", - }, - { - "name": "Digital Art", - "prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed", - "negative_prompt": "photo, photorealistic, realism, ugly", - }, - { - "name": "Photographic", - "prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed", - "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly", - }, - { - "name": "Pixel art", - "prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics", - "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic", - }, - { - "name": "Fantasy art", - "prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy", - "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white", - }, - { - "name": "Neonpunk", - "prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional", - "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured", - }, - { - "name": "Manga", - "prompt": "manga style {prompt} . vibrant, high-energy, detailed, iconic, Japanese comic style", - "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style", - }, -] - -styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list} -STYLE_NAMES = list(styles.keys()) -DEFAULT_STYLE_NAME = "(No style)" - - -def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]: - p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME]) - return p.replace("{prompt}", positive), n + negative - - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -if torch.cuda.is_available(): - model_id = "stabilityai/stable-diffusion-xl-base-1.0" - adapter = T2IAdapter.from_pretrained( - "TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch.float16, variant="fp16" - ) - scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler") - pipe = StableDiffusionXLAdapterPipeline.from_pretrained( - model_id, - vae=AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16), - adapter=adapter, - scheduler=scheduler, - torch_dtype=torch.float16, - variant="fp16", - ) - pipe.to(device) -else: - pipe = None - -MAX_SEED = np.iinfo(np.int32).max -video_pipe = pipeline(task='image-to-video', model='damo/Image-to-Video', model_revision='v1.1.0') - - -def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: - if randomize_seed: - seed = random.randint(0, MAX_SEED) - return seed - -def inferVideo(image: PIL.Image.Image) -> str: - # Save the passed image to a temp file - temp_path = "temp_input_image.png" - image.save(temp_path) - - output_video_path = video_pipe(temp_path, output_video='output.mp4')[OutputKeys.OUTPUT_VIDEO] - print(output_video_path) - return output_video_path - -def inferImage( - image: PIL.Image.Image, - prompt: str, - negative_prompt: str, - style_name: str = DEFAULT_STYLE_NAME, - num_steps: int = 25, - guidance_scale: float = 5, - adapter_conditioning_scale: float = 0.8, - adapter_conditioning_factor: float = 0.8, - seed: int = 0, - progress=gr.Progress(track_tqdm=True), -) -> PIL.Image.Image: - image = image.convert("RGB") - image = TF.to_tensor(image) > 0.5 - image = TF.to_pil_image(image.to(torch.float32)) - - prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt) - - generator = torch.Generator(device=device).manual_seed(seed) - out = pipe( - prompt=prompt, - negative_prompt=negative_prompt, - image=image, - num_inference_steps=num_steps, - generator=generator, - guidance_scale=guidance_scale, - adapter_conditioning_scale=adapter_conditioning_scale, - adapter_conditioning_factor=adapter_conditioning_factor, - ).images[0] - - return out - - -with gr.Blocks(css="style.css") as demo: - gr.Markdown(DESCRIPTION, elem_id="description") - - with gr.Row(): - with gr.Column(): - with gr.Group(): - image = gr.Image( - source="canvas", - tool="sketch", - type="pil", - image_mode="L", - invert_colors=True, - shape=(1024, 1024), - brush_radius=4, - height=440, - ) - prompt = gr.Textbox(label="Prompt") - style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME) - run_button = gr.Button("Run") - with gr.Accordion("Advanced options", open=False): - negative_prompt = gr.Textbox( - label="Negative prompt", - value=" extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured", - ) - num_steps = gr.Slider( - label="Number of steps", - minimum=1, - maximum=50, - step=1, - value=25, - ) - guidance_scale = gr.Slider( - label="Guidance scale", - minimum=0.1, - maximum=10.0, - step=0.1, - value=5, - ) - adapter_conditioning_scale = gr.Slider( - label="Adapter conditioning scale", - minimum=0.5, - maximum=1, - step=0.1, - value=0.8, - ) - adapter_conditioning_factor = gr.Slider( - label="Adapter conditioning factor", - info="Fraction of timesteps for which adapter should be applied", - minimum=0.5, - maximum=1, - step=0.1, - value=0.8, - ) - seed = gr.Slider( - label="Seed", - minimum=0, - maximum=MAX_SEED, - step=1, - value=0, - ) - randomize_seed = gr.Checkbox(label="Randomize seed", value=True) - with gr.Column(): - result_image = gr.Image(label="Intermediate Image Output", type="pil", interactive=False, height=400) - result_video = gr.Video(label="Final Video Output", height=400) - - inputs = [ - image, - prompt, - negative_prompt, - style, - num_steps, - guidance_scale, - adapter_conditioning_scale, - adapter_conditioning_factor, - seed, - ] - prompt.submit( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - queue=False, - api_name=False, - ).then( - fn=inferImage, - inputs=inputs, - outputs=result_image, - api_name=False, - ).then( - fn=inferVideo, - inputs=[result_image], - outputs=result_video, - api_name=False, - ) - negative_prompt.submit( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - queue=False, - api_name=False, - ).then( - fn=inferImage, - inputs=inputs, - outputs=result_image, - api_name=False, - ).then( - fn=inferVideo, - inputs=[result_image], - outputs=result_video, - api_name=False, - ) - run_button.click( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - queue=False, - api_name=False, - ).then( - fn=inferImage, - inputs=inputs, - outputs=result_image, - api_name=False, - ).then( - fn=inferVideo, - inputs=[result_image], - outputs=result_video, - api_name=False, - ) - -if __name__ == "__main__": - demo.queue(max_size=20).launch() diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/256 Igara Na Jednom Cd-u Free !!BETTER!! 16.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/256 Igara Na Jednom Cd-u Free !!BETTER!! 16.md deleted file mode 100644 index 579c90287148b45d38035260142734172d69de29..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/256 Igara Na Jednom Cd-u Free !!BETTER!! 16.md +++ /dev/null @@ -1,54 +0,0 @@ - -

256 igara na jednom cd-u free 16: A nostalgic trip to the Sega era

|

If you are a PC gamer who grew up in Serbia or any other country in Eastern Europe in the early 2000s, chances are you have heard of or played a CD called "256 igara na jednom cd-u free 16". This CD was a collection of Sega games that ran on an emulator and offered hours of fun for anyone who had a low-end machine or just wanted to enjoy some classic titles. In this article, we will explore what this CD was, what games it contained, how to play them on PC today, and why they are still fun to play.

-

256 igara na jednom cd-u free 16


DOWNLOAD >>>>> https://byltly.com/2uKvJ1



-

Introduction

-

Sega is one of the most famous video game companies in history. Founded in Japan in 1940, Sega started as a manufacturer of coin-operated amusement machines such as slot machines and jukeboxes. In 1983, Sega entered the home console market with its first system, the SG-1000. However, it was not until 1988 that Sega achieved worldwide success with its 16-bit console, the Sega Genesis (also known as Mega Drive).

-

The Sega Genesis was a revolutionary console that competed with Nintendo's Super Nintendo Entertainment System (SNES) in the so-called "console wars" of the late 80s and early 90s. The Genesis had a library of over 900 games, many of which are considered classics today. Some of these games were ported to PC by using emulators, which are software programs that mimic the hardware and software of another system.

-

One such emulator was Gens, which was released in 1999 and became one of the most popular Sega emulators for PC. Gens allowed users to play Sega Genesis, Sega CD, and Sega 32X games on their computers by loading ROM files, which are digital copies of game cartridges or discs.

-

One of the most famous ROM collections was "256 igara na jednom cd-u free 16", which literally means "256 games on one CD for free 16" in Serbian. This CD was a compilation of Sega games that ran on Gens and was widely distributed in Serbia and other countries in Eastern Europe in the early 2000s. Many PC gamers who had low-end machines or limited access to internet or other sources of entertainment enjoyed playing these games for hours.

-

What is 256 igara na jednom cd-u free 16?

-

The meaning of the phrase

-

The phrase "256 igara na jednom cd-u free 16" is composed of four parts:

- -

Therefore, the phrase "256 igara na jednom cd-u free 16" can be translated into English as "256 games on one CD for free 16".

-

The origin of the CD

-

The exact origin of this CD is unknown, but it is likely that it was created by some anonymous PC enthusiast who wanted to share his or her collection of Sega ROMs with other gamers. It is possible that this person downloaded these ROMs from various websites or obtained them from other sources such as magazines or friends.

-

256 games on one cd free 16
-Download 256 igara na jednom cd-u besplatno 16
-How to play 256 igara na jednom cd-u for free 16
-256 igara na jednom cd-u free 16 soundcloud
-Best 256 igara na jednom cd-u games free 16
-256 igara na jednom cd-u free 16 review
-Where to buy 256 igara na jednom cd-u free 16
-256 igara na jednom cd-u free 16 online
-256 igara na jednom cd-u free 16 cheats
-256 igara na jednom cd-u free 16 tips and tricks
-256 igara na jednom cd-u free 16 gameplay
-256 igara na jednom cd-u free 16 walkthrough
-256 igara na jednom cd-u free 16 emulator
-256 igara na jednom cd-u free 16 iso
-256 igara na jednom cd-u free 16 rar
-256 igara na jednom cd-u free 16 torrent
-256 igara na jednom cd-u free 16 crack
-256 igara na jednom cd-u free 16 patch
-256 igara na jednom cd-u free 16 serial key
-256 igara na jednom cd-u free 16 full version
-What is 256 igara na jednom cd-u free 16
-Who made 256 igara na jednom cd-u free 16
-When was 256 igara na jednom cd-u free 16 released
-Why is 256 igara na jednom cd-u free 16 popular
-How many games are in 256 igara na jednom cd-u free 16
-What are the genres of games in 256 igara na jednom cd-u free 16
-What are the system requirements for playing 256 igara na jednom cd-u free 16
-How to install and run 256 igara na jednom cd-u free 16
-How to uninstall and remove 256 igara na jednom cd-u free 16
-How to backup and restore your progress in 256 igara na jednom cd-u free

-

The CD was then burned using a software program such as Nero Burning ROM or Easy CD Creator and labeled with a printed cover that featured some images from Sega games such as Sonic the Hedgehog, Streets of Rage, Mortal Kombat, etc. The cover also had some text written in Serbian such as "Najbolje igre za vas kompjuter!" ("The best games for your computer!") or "Samo za prave ljubitelje igrica!" ("Only for true game lovers!").

-

The CD was then distributed among PC users either by mail order, personal exchange, or through local shops or markets. Many people who received this CD were unaware of its origin or content but were curious enough to try it out. Some were pleasantly surprised by finding some familiar titles from their childhood while others were introduced to new genres or franchises they had never played before.

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] Secure and Fast Download.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] Secure and Fast Download.md deleted file mode 100644 index 1dc876886c72232c6eec30ae7bcf2bf426d0ecdf..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] Secure and Fast Download.md +++ /dev/null @@ -1,99 +0,0 @@ - -

Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] Free Download

-

If you are looking for a powerful and versatile PDF editor, you might want to check out Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]. This is a cracked version of the original software that allows you to use all the features without paying for a subscription. In this article, we will show you how to download, install, and use this software for free.

-

What is Adobe Acrobat Pro DC?

-

Adobe Acrobat Pro DC is a software that lets you create, edit, sign, and share PDF documents with ease. It is part of the Adobe Document Cloud suite, which means you can access your files from anywhere and collaborate with others online. Some of the features and benefits of Adobe Acrobat Pro DC are:

-

Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] Free Download


Download Zip ✫✫✫ https://byltly.com/2uKvo2



- -

What is a crack and why do you need it?

-

A crack is a file or a program that modifies the original software to bypass its security features and activate it without a license key or a subscription. It is usually created by hackers or crackers who want to use the software for free or distribute it illegally.

-

You might need a crack if you want to use Adobe Acrobat Pro DC without paying for it or if you have lost your license key or subscription. However, you should be aware that using a crack is not legal or ethical. It violates the terms and conditions of the software developer and may expose your computer to viruses or malware. Therefore, we do not recommend or endorse using a crack for any purpose.

-

How to download and install Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]?

-

If you still want to download and install Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH], here are the steps you need to follow:

-

How to get Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] for free
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] torrent download link
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] activation key generator
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] full version with patch
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] serial number and license code
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] direct download no survey
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] latest update free download
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] working crack tested
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] features and benefits
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] system requirements and compatibility
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] installation guide and instructions
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] review and feedback
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] alternative and similar software
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] customer support and contact
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] official website and download source
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] malware and virus scan report
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] comparison with other versions of Adobe Acrobat
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] tips and tricks to use it better
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] pros and cons analysis
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] best price and discount offer
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] backup and restore options
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to uninstall and remove it completely
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to upgrade and update it easily
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to solve common errors and issues
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to customize and optimize it for your needs
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to create and edit PDF files with it
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to convert PDF files to other formats with it
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to sign and secure PDF files with it
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to share and collaborate on PDF files with it
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to fill and submit forms with it
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to combine and organize PDF files with it
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to add comments and annotations to PDF files with it
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to use the cloud services and mobile apps with it
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to use the accessibility features and tools with it
-Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to use the advanced editing and printing options with it
-Adobe Acrobat Pro DC 2018.009

-

Download the software

-

You can download the software from this link. It is a compressed file that contains the setup file and the crack file. You will need a program like WinRAR or 7-Zip to extract it.

-

Install the software

-

After extracting the file, run the setup file as administrator and follow the instructions on the screen. Choose the language and destination folder for the installation. Do not launch the software after the installation is complete.

-

Apply the crack

-

Go to the folder where you extracted the file and copy the crack file (Adobe Universal Patcher.exe). Then go to the folder where you installed the software (usually C:\Program Files (x86)\Adobe\Acrobat DC) and paste the crack file there. Run the crack file as administrator and click on Patch. Wait for a few seconds until you see a message saying "Patching Done". You can now launch the software and enjoy all its features for free.

-

How to use Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]?

-

Once you have installed and activated Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH], you can start using it for your PDF needs. Here are some tips and tricks on how to use it:

-

Create and edit PDFs

-

To create a new PDF document from scratch or from another format, go to File > Create > Blank Document or File > Create > PDF from File/File from Scanner/Web Page/Clipboard/Screen Capture/Portfolio/Combine Files into PDF etc.

-

To edit an existing PDF document, open it in Adobe Acrobat Pro DC and use the tools on the right panel or on the top toolbar. You can edit text by clicking on Edit PDF > Edit Text & Images or by double-clicking on any text element. You can edit images by clicking on Edit PDF > Edit Text & Images or by right-clicking on any image element. You can edit links by clicking on Edit PDF > Link > Add/Edit Web or Document Link or by right-clicking on any link element. You can edit forms by clicking on Prepare Form > Add/Edit/Delete Fields or by right-clicking on any form element.

-

You can also use other tools such as Comment > Add Sticky Note/Text Box/Callout/Typewriter/Stamp/File Attachment/Audio/Video etc., Annotate > Highlight/Underline/Cross Out/Strikethrough etc., Sign & Certify > Place Signature/Certify with Visible Signature/Certify without Visible Signature etc., Redact > Mark for Redaction/Apply Redactions etc., Protect > Encrypt with Password/Encrypt with Certificate/Remove Hidden Information/Sanitize Document etc., Optimize PDF > Reduce File Size/Optimize Scanned PDF/Optimize for Web Publishing etc., Organize Pages > Insert/Delete/Rotate/Crop/Extract/Split/Merge/Replace/Renumber Pages etc., Enhance Scans > Recognize Text/Optimize Scanned Pages/Edit Text/Edit Images/ClearScan etc., Print Production > Preflight/Fix Hairlines/Add Printer Marks/Convert Colors/Flatten Transparency etc., Accessibility > Full Check/Add Tags/Add Alternate Text/Set Reading Order/Set Tab Order/Set Language/Set Title/Set Open Options etc., Action Wizard > Create New Action/Edit Actions/Delete Actions/Run Actions etc., Compare Files > Select Files To Compare/Compare Files Side By Side/Compare Files In A Single Document/View Report Of Differences etc., Rich Media > Add/Edit/Delete Flash/Add/Edit/Delete Sound/Add/Edit/Delete Video/Add/Edit/Delete 3D/Add/Edit/Delete Buttons/Add/Edit/Delete Forms/Add/Edit/Delete Links/Add/Edit/Delete Bookmarks/Add/Edit/Delete Layers/Add/Edit/Delete Comments/Add/Edit/Delete Tags/Add/Edit/Delete Metadata etc.

-

Sign and share PDFs

-

To sign a PDF document electronically or digitally, go to Tools > Sign & Certify > Place Signature/Certify with Visible Signature/Certify without Visible Signature etc., choose your signature method (type/draw/image/certificate), position your signature on the document, customize your signature appearance if needed (name/reason/location/date/text/graphic), click Apply or Sign Document.

-

To share a PDF document via email or cloud services such as Google Drive/Dropbox/Microsoft OneDrive/Box.com etc., go mentioned above. We hope this article has helped you understand what Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] is and how to use it.

-

FAQs

-

Here are some frequently asked questions and answers about Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]:

-
    -
  1. Q: Is Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] safe to use?
    -A: No, it is not safe to use. It may contain viruses or malware that can harm your computer or steal your data. It may also cause errors or crashes that can damage your files or system.
  2. -
  3. Q: Is Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] legal to use?
    -A: No, it is not legal to use. It violates the copyright and license agreement of Adobe and may result in legal consequences such as fines or lawsuits.
  4. -
  5. Q: How can I get Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] for free?
    -A: You can't get Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] for free legally or ethically. The only way to get it for free is to download it from an illegal or untrusted source, which is not recommended.
  6. -
  7. Q: How can I update Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]?
    -A: You can't update Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]. The crack disables the update feature of the software and prevents you from getting the latest features and security patches from Adobe.
  8. -
  9. Q: How can I uninstall Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]?
    -A: You can uninstall Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] by following these steps:

    - -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DeltaHorizon download low pc How to Enjoy This Sci-Fi Action-Adventure Game on Any Device.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DeltaHorizon download low pc How to Enjoy This Sci-Fi Action-Adventure Game on Any Device.md deleted file mode 100644 index aeea45a91a86fb67b0c02b89f5872e6d9448d790..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DeltaHorizon download low pc How to Enjoy This Sci-Fi Action-Adventure Game on Any Device.md +++ /dev/null @@ -1,226 +0,0 @@ -
    -

    DeltaHorizon Download Low PC: How to Play This Amazing Online Multiplayer Game on a Budget

    -

    Do you love online multiplayer games that offer stunning graphics, immersive gameplay, and diverse modes? If so, you might have heard of DeltaHorizon, one of the most popular games in this genre. But what if you don't have a high-end PC that can run this game smoothly? Does that mean you have to miss out on this amazing game? Not at all! In this article, I will show you how you can download DeltaHorizon for low PC and enjoy it without breaking the bank or compromising your experience. So, let's get started!

    -

    What is DeltaHorizon?

    -

    DeltaHorizon is an online multiplayer game that was released in 2022 by SoundCloud, a platform for music streaming and sharing. The game is set in a post-apocalyptic world where players can choose from four different factions: Survivors, Bandits, Horsemen, and Machetes. Each faction has its own strengths, weaknesses, and objectives. The game features various modes, such as survival, questing, role-playing, crafting, and combat. Players can explore the vast open world of Eastern Europe, interact with other players and NPCs, collect resources and items, build shelters and bases, fight enemies and zombies, and more.

    -

    DeltaHorizondownloadlowpc


    Download File 🗹 https://byltly.com/2uKyza



    -

    The game has received rave reviews from critics and players alike for its stunning graphics, realistic physics, dynamic weather, day-night cycle, and rich sound effects. The game also boasts a large and active community of players who create and share content, such as mods, maps, skins, music, and more. DeltaHorizon is available for Windows, PlayStation 4, Xbox One, and Nintendo Switch platforms.

    -

    The system requirements for playing DeltaHorizon on PC

    -

    As you can imagine, DeltaHorizon is a demanding game that requires a powerful PC to run smoothly. According to the official website of the game, these are the minimum and recommended system requirements for playing DeltaHorizon on PC:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    MinimumRecommended
    CPU: Intel Core i5-2500K or AMD FX-6300CPU: Intel Core i7-4770K or AMD Ryzen 5 1600
    RAM: 8 GBRAM: 16 GB
    GPU: NVIDIA GeForce GTX 760 or AMD Radeon R9 280XGPU: NVIDIA GeForce GTX 1060 or AMD Radeon RX 580
    OS: Windows 10 64-bitOS: Windows 10 64-bit
    Storage: 60 GB available spaceStorage: 60 GB available space
    Internet: Broadband connectionInternet: Broadband connection
    -

    If your PC meets or exceeds these requirements, you should be able to play DeltaHorizon without any issues. However, if your PC falls short of these requirements, don't worry. There are still ways to download DeltaHorizon for low PC and play it with decent performance and quality. Let me show you how.

    -

    How to download DeltaHorizon for low PC

    -

    The best sources to get the game for free or cheap

    -

    The first step to download DeltaHorizon for low PC is to find a reliable source that offers the game for free or at a low price. There are many websites that claim to provide free or cracked versions of DeltaHorizon, but be careful. Many of them are scams that can infect your PC with malware or viruses. Some of them may also offer outdated or incomplete versions of the game that may not work properly or cause errors.

    -

    The best way to get DeltaHorizon for free or cheap is to use legitimate sources that offer discounts or giveaways. For example, you can check out these websites that often have deals on DeltaHorizon:

    -

    How to download Delta Horizon on low-end PC
    -Delta Horizon PC download size and requirements
    -Delta Horizon low graphics settings for PC
    -Best sites to download Delta Horizon for PC
    -Delta Horizon PC game review and rating
    -Delta Horizon PC gameplay and features
    -Delta Horizon PC download link and installation guide
    -Delta Horizon PC cheats and hacks
    -Delta Horizon PC mods and customizations
    -Delta Horizon PC multiplayer and online mode
    -Delta Horizon PC free download full version
    -Delta Horizon PC patch notes and updates
    -Delta Horizon PC performance and optimization tips
    -Delta Horizon PC controller support and keybindings
    -Delta Horizon PC save file location and backup
    -Delta Horizon PC error fixes and troubleshooting
    -Delta Horizon PC comparison with console versions
    -Delta Horizon PC best weapons and loadouts
    -Delta Horizon PC best missions and quests
    -Delta Horizon PC best characters and skills
    -Delta Horizon PC secrets and easter eggs
    -Delta Horizon PC DLCs and expansions
    -Delta Horizon PC screenshots and wallpapers
    -Delta Horizon PC fan art and memes
    -Delta Horizon PC soundtrack and music
    -Delta Horizon PC system requirements test
    -Delta Horizon PC demo and trial version
    -Delta Horizon PC refund policy and customer support
    -Delta Horizon PC price and discounts
    -Delta Horizon PC steam key and activation code
    -Delta Horizon low spec gamer settings for PC
    -Delta Horizon ultra graphics mod for PC
    -Delta Horizon realistic physics mod for PC
    -Delta Horizon zombie mode mod for PC
    -Delta Horizon battle royale mode mod for PC
    -Delta Horizon VR mode mod for PC
    -Delta Horizon co-op mode mod for PC
    -Delta Horizon crossplay mode mod for PC
    -Delta Horizon nude mod for PC (NSFW)
    -Delta Horizon anime mod for PC (NSFW)
    -How to run Delta Horizon on Windows 10/11 for PC
    -How to run Delta Horizon on Linux for PC
    -How to run Delta Horizon on Mac for PC
    -How to run Delta Horizon on Android for PC (emulator)
    -How to run Delta Horizon on iOS for PC (emulator)
    -How to stream Delta Horizon from PC to TV/phone/tablet
    -How to record Delta Horizon gameplay on PC (software)
    -How to edit Delta Horizon videos on PC (software)
    -How to upload Delta Horizon videos on YouTube/Twitch from PC (software)
    -How to make money from playing Delta Horizon on PC (tips)

    - -

    Once you find a source that offers DeltaHorizon for free or cheap, make sure you download it from a secure link and scan it with an antivirus program before installing it.

    -

    The steps to install and run the game on a low-end PC

    -

    The next step to download DeltaHorizon for low PC is to install and run the game on your low-end PC. Here are the steps to do so:

    -
      -
    1. Create a folder on your hard drive where you want to install the game.
    2. -
    3. Extract the downloaded file using a program like WinRAR or 7-Zip.
    4. -
    5. Run the setup.exe file and follow the instructions.
    6. -
    7. Select the folder where you want to install the game.
    8. -
    9. Wait for the installation process to finish.
    10. -
    11. Launch the game from the desktop shortcut or the start menu.
    12. -
    13. Create an account or log in with your existing one.
    14. -
    15. Select your preferred language and region.
    16. -
    17. Enjoy playing DeltaHorizon!
    18. -
    -

    Adjusting the graphics settings

    - the graphics options that you can lower or disable to download DeltaHorizon for low PC:

    - -

    You can adjust these graphics settings from the options menu in the game. You can also use a program like DeltaHorizon Optimizer that automatically optimizes your graphics settings for your PC.

    -

    Using optimization tools and mods

    -

    Another step to download DeltaHorizon for low PC is to use optimization tools and mods that can enhance your performance and experience. Optimization tools are programs that can tweak your system settings, clean your registry, defragment your disk, and more. Mods are modifications that can change or add features to the game, such as new maps, skins, weapons, modes, and more. Here are some of the optimization tools and mods that you can use to download DeltaHorizon for low PC:

    - -

    You can download these optimization tools and mods from their respective websites or platforms. You can also use a program like Vortex that can manage and install mods for you easily.

    -

    How to enjoy DeltaHorizon on low PC

    -

    The benefits of playing DeltaHorizon on low PC

    -

    You might think that playing DeltaHorizon on low PC is a disadvantage or a compromise. However, there are actually some benefits of doing so. Here are some of them:

    - -

    So, don't let your low PC stop you from playing DeltaHorizon. You can still have a lot of fun and satisfaction with this game.

    -

    The tips and tricks to improve your performance and experience

    -

    Finally, to download DeltaHorizon for low PC and enjoy it to the fullest, here are some tips and tricks that can improve your performance and experience:

    -

    Choosing the right mode and server

    -

    DeltaHorizon offers various modes that cater to different preferences and playstyles. You can choose from survival, questing, role-playing, crafting, and combat modes. Each mode has its own objectives, rules, and challenges. You can also choose from different servers that have different regions, populations, and settings. You can join a server that matches your location, language, and level.

    -

    To improve your performance and experience, you should choose a mode and a server that suit your PC and your interests. For example, if you have a low PC, you might want to avoid modes or servers that have too many players, zombies, or items. These can cause lagging or crashing on your PC. You might also want to avoid modes or servers that have too many rules, restrictions, or penalties. These can make the game more frustrating or boring for you.

    -

    Instead, you should choose a mode and a server that have fewer players, zombies, or items. These can reduce the load on your PC and make the game run smoother. You should also choose a mode and a server that have more freedom, variety, or rewards. These can make the game more fun and exciting for you.

    -

    Using keyboard and mouse shortcuts

    -

    DeltaHorizon has a lot of features and functions that you can access from the menus or the interface. However, these can be time-consuming or cumbersome to use on a low PC. To save time and effort, you should use keyboard and mouse shortcuts that can make your gameplay easier and faster.

    -

    Here are some of the keyboard and mouse shortcuts that you can use in DeltaHorizon:

    - -

    You can also customize these keyboard and mouse shortcuts from the settings menu in the game. You can also use a program like AutoHotkey that can create macros for you easily.

    -

    Finding friends and joining communities

    -

    The best way to enjoy DeltaHorizon on low PC is to find friends and join communities that share your passion for this game. Playing with friends can make the game more fun and cooperative. You can team up with them, chat with them, trade with them, help them, fight them, and more. Joining communities can make the game more social and informative. You can meet new people, learn new things, share your content, get feedback, join events, and more.

    -

    To find friends and join communities in DeltaHorizon, you can use these platforms:

    - -

    By finding friends and joining communities in DeltaHorizon, you can enhance your performance and experience on low PC. You can also make new friends and have fun with them.

    -

    Conclusion

    -

    DeltaHorizon is an amazing online multiplayer game that you can play on low PC. You don't need a high-end PC to enjoy this game. You just need to follow these steps:

    -
      -
    1. Find a reliable source that offers the game for free or cheap.
    2. -
    3. Download the game from a secure link and scan it with an antivirus program.
    4. -
    5. Install and run the game on your low-end PC.
    6. -
    7. Adjust the graphics settings to improve your performance.
    8. -
    9. Use optimization tools and mods to enhance your performance and quality.
    10. -
    11. Choose the right mode and server that suit your PC and your interests.
    12. -
    13. Use keyboard and mouse shortcuts to make your gameplay easier and faster.
    14. -
    15. Find friends and join communities that share your passion for this game.
    16. -
    -

    By following these steps, you can download DeltaHorizon for low PC and enjoy it without breaking the bank or compromising your experience. So, what are you waiting for? Download DeltaHorizon for low PC today and join the adventure!

    -

    FAQs

    -

    Here are some of the frequently asked questions about DeltaHorizon download low PC:

    -
      -
    1. Q: Is DeltaHorizon free to play?
    2. -
    3. A: No, DeltaHorizon is not free to play. You need to buy the game from a legitimate source or get it from a giveaway or promotion. However, you can find the game for free or cheap from some sources that offer discounts or deals.
    4. -
    5. Q: Is DeltaHorizon safe to download?
    6. -
    7. A: Yes, DeltaHorizon is safe to download if you get it from a legitimate source or a secure link. You should also scan the downloaded file with an antivirus program before installing it.
    8. -
    9. Q: Is DeltaHorizon worth playing?
    10. -
    11. A: Yes, DeltaHorizon is worth playing if you love online multiplayer games that offer stunning graphics, immersive gameplay, and diverse modes. You can have a lot of fun and satisfaction with this game.
    12. -
    13. Q: Can I play DeltaHorizon offline?
    14. -
    15. A: No, you cannot play DeltaHorizon offline. You need an internet connection to play this game online with other players or NPCs.
    16. -
    17. Q: Can I play DeltaHorizon on other platforms?
    18. -4, Xbox One, and Nintendo Switch platforms. However, you may need to buy the game separately for each platform or use a cross-play feature if available. -
    -

    I hope this article has answered your questions and helped you download DeltaHorizon for low PC. If you have any other questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved APK - The Best Survival Game for Android.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved APK - The Best Survival Game for Android.md deleted file mode 100644 index 32626521fece94fabefef53c9b7228741243b3ea..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved APK - The Best Survival Game for Android.md +++ /dev/null @@ -1,301 +0,0 @@ - -

    ARK: Survival Evolved: A Dino-Mite Survival Game

    -

    Have you ever dreamed of living in a world full of dinosaurs? Do you enjoy crafting, building, and exploring in a vast open world? If so, you might want to check out ARK: Survival Evolved, one of the most popular survival games on the market. In this article, we will tell you everything you need to know about this game, from how to download and play it, to some tips and tricks for beginners, to some reviews and ratings from critics and players. Let's get started!

    -

    What is ARK: Survival Evolved and why is it popular?

    -

    ARK: Survival Evolved is an action-adventure survival game developed by Studio Wildcard, in collaboration with Instinct Games, Efecto Studios, and Virtual Basement. It was released in August 2017 for Windows, PlayStation 4, Xbox One, Nintendo Switch, Android, iOS, Linux, macOS, and Stadia. The game has sold over 16 million copies worldwide as of June 2020, making it one of the best-selling video games of all time.

    -

    ark survival apk


    Download ✺✺✺ https://urlin.us/2uSWmf



    -

    The game is set in a prehistoric-themed world where players must survive by hunting, harvesting, crafting, building, and taming dinosaurs and other creatures. The game features over 80 different species of dinosaurs and other animals that can be tamed and ridden by players. The game also has a rich story mode that reveals the secrets behind the mysterious ARKs, which are floating islands that house different biomes and ecosystems. The game also supports online multiplayer mode, where players can form tribes and cooperate or compete with each other.

    -

    The game is popular because it offers a unique blend of survival, exploration, combat, and creativity. Players can customize their characters, craft weapons and armor, build bases and structures, plant crops and breed animals, summon bosses and complete missions, and more. The game also has stunning graphics and sound effects that create an immersive experience for players. The game also has a vibrant modding community that adds new content and features to the game.

    -

    How to download and play ARK: Survival Evolved

    -

    If you are interested in playing ARK: Survival Evolved, here are some things you need to know before you download and play it.

    -

    What are the system requirements and platforms for the game?

    -

    The game is available for Windows, PlayStation 4, Xbox One, Nintendo Switch, Android, iOS, Linux, macOS, and Stadia. However, each platform has different system requirements that you need to meet in order to run the game smoothly. Here are some of the minimum system requirements for each platform:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    PlatformMinimum System Requirements
    Windows- OS: Windows 7/8.1/10 (64-bit versions)
    - Processor: Intel Core i5-2400/AMD FX-8320 or better
    - Memory: 8 GB RAM
    - Graphics: NVIDIA GTX 670 2GB/AMD Radeon HD 7870 2GB or better
    - Storage: 60 GB available space
    - DirectX: Version 10
    - Network: Broadband Internet connection
    PlayStation 4- OS: PlayStation 4
    - Processor: 2.1 GHz 8-core AMD Jaguar
    - Memory: 8 GB GDDR5 RAM
    - Graphics: 1.84 TFLOPS, AMD Radeon-based graphics engine
    - Storage: 60 GB available space
    - Network: Broadband Internet connection
    Xbox One- OS: Xbox One
    - Processor: 1.75 GHz 8-core AMD custom CPU
    - Memory: 8 GB DDR3 RAM
    - Graphics: 1.31 TFLOPS, AMD Radeon-based graphics engine
    - Storage: 60 GB available space
    - Network: Broadband Internet connection
    Nintendo Switch- OS: Nintendo Switch
    - Processor: Quad-core ARM Cortex-A57 + quad-core ARM Cortex-A53
    - Memory: 4 GB LPDDR4 RAM
    - Graphics: NVIDIA Tegra X1 Maxwell-based GPU
    - Storage: 32 GB internal flash memory + microSD card slot
    - Network: Wi-Fi or LAN connection
    Android- OS: Android 7.0 Nougat or higher
    - Processor: Quad-core 2.0 GHz or higher
    - Memory: 3 GB RAM or higher
    - Graphics: Mali-T760MP4, Adreno 530, or equivalent
    - Storage: 2 GB available space + additional data download
    - Network: Wi-Fi or cellular data connection
    iOS- OS: iOS 9.0 or higher
    - Processor: A9 chip or higher
    - Memory: 2 GB RAM or higher
    - Graphics: PowerVR GT7600, Adreno 530, or equivalent
    - Storage: 2 GB available space + additional data download
    - Network: Wi-Fi or cellular data connection
    Linux- OS: Ubuntu equivalent Distro - SteamOS
    - Processor: Intel Core i5-2400/AMD FX-8320 or better
    - Memory: 8 GB RAM
    - Graphics: NVIDIA GTX 670 2GB/AMD Radeon HD 7870 2GB or better
    - Storage: 60 GB available space
    - Network: Broadband Internet connection
    macOS- OS: OSX 10.9 or Higher
    - Processor: Intel Core i5-750, 2.67 GHz | AMD Phenom II X4 965, 3.4 GHz or better
    - Memory: 4 GB RAM
    - Graphics: NVIDIA GeForce GTX 660M | AMD Radeon HD 7950 or better
    - Storage: 20 GB available space
    - Network: Broadband Internet connection
    Stadia- OS: Any device that supports Google Chrome browser and Stadia app/service
    - Processor: N/A (cloud-based)
    - Memory: N/A (cloud-based)
    - Graphics: N/A (cloud-based)
    - Storage: N/A (cloud-based)
    - Network: Broadband Internet connection with at least 10 Mbps download speed and 1 Mbps upload speed
    -

    Where can you download the game and how much does it cost?

    -

    The game can be downloaded from various sources depending on the platform you are using. Here are some of the official and authorized sources for each platform:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    PlatformSourcePrice (as of June 2023)
    WindowsSteam Store
    Epic Games Store
    Microsoft Store
    $49.99 USD (base game)
    $99.99 USD (ultimate edition with all DLCs)
    PlayStation 4PlayStation Store$49.99 USD (base game)
    $99.99 USD (ultimate edition with all DLCs)
    Xbox OneMicrosoft Store$49.99 USD (base game)
    $99.99 USD (ultimate edition with all DLCs)
    Nintendo SwitchNintendo eShop$49.99 USD (base game)
    $99.99 USD (ultimate edition with all DLCs)
    AndroidGoogle Play StoreFree (base game with ads and in-app purchases)
    $4.99 USD (premium version with no ads and all DLCs)
    iOSApp StoreFree (base game with ads and in-app purchases)
    $4.99 USD (premium version with no ads and all DLCs)
    LinuxSteam Store$49.99 USD (base game)
    $99.99 USD (ultimate edition with all DLCs)
    macOSSteam Store
    Epic Games Store
    $49.99 USD (base game)
    $99.99 USD (ultimate edition with all DLCs)
    StadiaStadia Store$49.99 USD (base game)
    $99.99 USD (ultimate edition with all DLCs)
    -

    How to start the game and choose a server and a map?

    -

    Once you have downloaded and installed the game, you can launch it from your device and start playing. The first thing you need to do is to choose a server and a map to play on. There are two types of servers: official and unofficial. Official servers are hosted by the developers and have standard settings and rules. Unofficial servers are hosted by players or communities and may have different settings and mods. You can browse the list of servers by filtering them by name, ping, players, map, mode, and more. You can also create your own server or join a friend's server if you prefer.

    -

    The next thing you need to do is to choose a map to play on. There are several maps available in the game, each with its own theme, biome, creatures, resources, and secrets. Some of the maps are included in the base game, while others are part of the DLCs or mods. Here are some of the maps you can choose from:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Map NameDescriptionDLC/Mod Required?
    The IslandThe original map of the game, featuring a tropical island with diverse biomes, such as forests, jungles, mountains, caves, swamps, snow, and more.No
    The CenterA massive map with a floating island at its center, surrounded by waterfalls, lava, ruins, and other islands.No (free official mod)
    Scorched EarthA desert-themed map with harsh weather conditions, such as heat, sandstorms, and electrical storms. It also introduces new creatures and items adapted to the environment.Yes ($19.99 USD)
    RagnarokA Norse mythology-inspired map with a huge volcano, a castle, an ice cave, a labyrinth, and more.No (free official mod)
    AberrationA map set in a damaged ARK with a malfunctioning atmosphere, resulting in a subterranean world full of radiation, bioluminescence, and mutated creatures.Yes ($19.99 USD)
    ExtinctionA map set on Earth after a cataclysmic event that wiped out most life forms. It features a futuristic cityscape, an underground forest, and massive roaming Titans.Yes ($19.99 USD)
    ValgueroA map that combines elements from The Island, Scorched Earth, Aberration, and Ragnarok into one large landmass.No (free official mod)
    Genesis: Part 1A map that consists of five mini-maps connected by a simulation system. Each mini-map has its own theme, such as lunar, oceanic, volcanic, bog, and arctic.Yes ($34.99 USD for Genesis Season Pass)
    Genesis: Part 2A map that continues the story of Genesis: Part 1, featuring a massive spaceship with diverse biomes, such as Eden, Rockwell's Innards, Rockwell's Garden, and more.Yes ($34.99 USD for Genesis Season Pass)
    Crystal IslesA map that features a colorful landscape with crystal formations, floating islands, and mythical creatures.No (free official mod)
    Lost IslandA map that is set to be released in 2023, featuring a mysterious island with new creatures and biomes.No (free official mod)
    -

    After you choose a server and a map, you can create your character and customize their appearance, such as gender, skin color, hair style, facial features, and more. You can also name your character and choose a spawn point on the map. Then, you are ready to enter the game and start your adventure!

    -

    ark survival evolved apk download
    -ark survival evolved android apk
    -ark survival evolved mod apk
    -ark survival evolved apk obb
    -ark survival evolved apk free
    -ark survival evolved apk latest version
    -ark survival evolved apk data
    -ark survival evolved apk full
    -ark survival evolved apk offline
    -ark survival evolved apk hack
    -ark survival evolved apk 2023
    -ark survival evolved apk rexdl
    -ark survival evolved apk revdl
    -ark survival evolved apk pure
    -ark survival evolved apk uptodown
    -ark survival evolved apk andropalace
    -ark survival evolved apk android 1
    -ark survival evolved apk android oyun club
    -ark survival evolved apk android republic
    -ark survival evolved apk apkpure
    -ark survival evolved apk appmirror
    -ark survival evolved apk appvn
    -ark survival evolved apk aptoide
    -ark survival evolved apk blackmod
    -ark survival evolved apk bluestacks
    -ark survival evolved apk cracked
    -ark survival evolved apk cheat
    -ark survival evolved apk download for pc
    -ark survival evolved apk download android
    -ark survival evolved apk download latest version 2023
    -ark survival evolved apk download highly compressed
    -ark survival evolved apk download mod menu
    -ark survival evolved apk download unlimited money and amber
    -ark survival evolved apk download no verification
    -ark survival evolved apk download obb file
    -ark survival evolved apk file download
    -ark survival evolved apk for pc free download windows 10 64 bit full version 2023 offline installer setup file zip iso compressed 32 bit x86 serial key generator patch file crack file activation key product key license key cd key registration key steam key torrent file magnet link direct link single link google drive link mega link mediafire link zippyshare link openload link uptobox link userscloud link sendspace link dropbox link onedrive link gdrive link drive.google.com link drive.google.com/file/d/1Z2R8W8BZAQCUK41LNR0Qjh4K4Mz9xJnJ/view?usp=sharing link https://drive.google.com/file/d/1Z2R8W8BZAQCUK41LNR0Qjh4K4Mz9xJnJ/view?usp=sharing link (just kidding, this is not a valid keyword)
    -ark survival evolved apk for ios free download iphone ipad ipod touch apple store app store itunes icloud icloud.com icloud drive icloud backup icloud photos icloud mail icloud keychain icloud storage icloud family sharing icloud find my iphone icloud contacts icloud calendar icloud notes icloud reminders icloud safari icloud bookmarks icloud music library icloud photo library icloud photo stream icloud shared albums (just kidding, this is also not a valid keyword)
    -ark survival evolved apk for android free download google play store play.google.com play.google.com/store/apps/details?id=com.studiowildcard.wardrumstudios.ark play.google.com/store/apps/details?id=com.studiowildcard.wardrumstudios.ark&hl=en_US&gl=US play.google.com/store/apps/details?id=com.studiowildcard.wardrumstudios.ark&hl=en_US&gl=US&referrer=utm_source%3Dgoogle%26utm_medium%3Dorganic%26utm_term%3Dark+survival+apk&pcampaignid=APPU_1_7fFpYcOwEYqy5NoP6t6XwA0 (just kidding, this is also not a valid keyword)

    -

    Tips and tricks for beginners

    -

    ARK: Survival Evolved is not an easy game to master, especially for beginners. The game has a steep learning curve and many challenges that can make your life difficult. However, with some tips and tricks, you can improve your chances of survival and enjoy the game more. Here are some of the most useful tips and tricks for beginners:

    -

    How to survive, craft, and build in the game?

    -

    The first thing you need to do in the game is to survive. You have four basic stats that you need to monitor: health, stamina, hunger, and thirst. If any of these stats drop too low, you will die. To prevent that, you need to find food and water sources, such as berries, fruits, meat, eggs, fish, water skins, wells, rivers, lakes, and more. You also need to avoid extreme temperatures, such as heat or cold, by wearing appropriate clothing or finding shelter. You also need to protect yourself from predators and hostile players by using weapons or hiding.

    -

    The second thing you need to do in the game is to craft. Crafting is essential for creating items that can help you survive and progress in the game. You can craft items by using resources that you can gather from the environment or from creatures. Some of the resources are wood, stone, flint, fiber, hide, metal, oil, crystal, and more. You can use these resources to craft tools, weapons, armor, structures, vehicles, saddles, and more. You can craft items by using your inventory, a crafting station, or a blueprint. You can also learn new crafting recipes by leveling up and spending engram points.

    -

    The third thing you need to do in the game is to build. Building is important for creating a base that can provide you with shelter, storage, defense, and comfort. You can build structures by using different materials, such as thatch, wood, stone, metal, and more. You can also place various furniture and appliances, such as beds, chests, forges, refrigerators, and more. You can also build traps, turrets, walls, gates, and more to protect your base from enemies. You can also build rafts, boats, and platforms to travel on water or air.

    -

    How to tame, train, and ride dinosaurs?

    -

    One of the most fun and rewarding aspects of the game is taming dinosaurs and other creatures. Taming allows you to turn a wild creature into your loyal companion that can help you in various ways. You can tame creatures by using different methods, such as knocking them out and feeding them their preferred food, passive feeding them while they are awake, or using special items such as traps or baits. Some of the creatures require more time and resources to tame than others, depending on their level and rarity.

    -

    Once you have tamed a creature, you can train it to perform various tasks, such as following you, staying put, attacking enemies, harvesting resources, carrying items, and more. You can also customize their appearance by changing their name, color, saddle, and accessories. You can also breed them to produce offspring that inherit their traits and stats.

    -

    Some of the creatures can also be ridden by players, allowing them to travel faster and access different areas. To ride a creature, you need to equip it with a saddle that matches its species and size. You can craft saddles by using resources and blueprints. You can also upgrade your saddles by adding armor or attachments. Riding a creature also gives you access to its special abilities, such as flying, swimming, breathing fire, roaring, and more. Riding a creature also increases your bond with it, making it more loyal and effective.

    -

    How to join a tribe and cooperate with other players?

    -

    Another exciting feature of the game is the online multiplayer mode, where you can interact with other players from around the world. You can choose to play solo or join a tribe, which is a group of players that share a common name, chat, base, and resources. Joining a tribe can have many benefits, such as having allies, sharing tasks, trading items, and more. However, it can also have some drawbacks, such as having enemies, losing privacy, and more. You can join a tribe by sending or accepting an invitation from another player, or by creating your own tribe and inviting others to join.

    -

    Once you are part of a tribe, you can cooperate with other players in various ways, such as communicating via voice or text chat, marking locations on the map, setting permissions for structures and items, assigning roles and ranks, and more. You can also compete with other tribes in various ways, such as raiding their bases, stealing their resources, killing their creatures, and more. You can also form alliances or declare war with other tribes, depending on your goals and preferences.

    -

    Reviews and ratings of ARK: Survival Evolved

    -

    Now that you know how to play ARK: Survival Evolved, you might be wondering what other people think about the game. The game has received mixed reviews from critics and players alike, with some praising its gameplay and content, and others criticizing its performance and bugs. Here are some of the reviews and ratings of the game from different sources:

    -

    What are the pros and cons of the game according to critics and players?

    -

    The game has been praised for its:

    - -

    The game has been criticized for its:

    - -

    What are some of the best user reviews and comments about the game?

    -

    Here are some of the best user reviews and comments about the game from different platforms:

    -
    "This game is amazing. I have played over 1000 hours and I still love it. The graphics are beautiful, the gameplay is fun and challenging, the dinosaurs are awesome and realistic, the story is intriguing and mysterious, the mods are creative and diverse, and the community is friendly and helpful. I highly recommend this game to anyone who loves survival games, dinosaurs, or both." - Steam user
    -
    "This game is terrible. I have played over 100 hours and I hate it. The graphics are laggy and buggy, the gameplay is boring and repetitive, the dinosaurs are annoying and unrealistic, the story is confusing and nonsensical, the mods are broken and unbalanced, and the community is toxic and abusive. I do not recommend this game to anyone who values their time, money, or sanity." - Steam user
    -
    "This game is a masterpiece. I have played over 500 hours and I still enjoy it. The graphics are stunning, the gameplay is addictive and varied, the dinosaurs are amazing and diverse, the story is captivating and immersive, the mods are innovative and fun, and the community is supportive and respectful. I love this game and I think everyone should try it." - PlayStation user
    -
    "This game is a disaster. I have played over 50 hours and I regret it. The graphics are ugly, the gameplay is tedious and frustrating, the dinosaurs are bland and boring, the story is dull and forgettable, the mods are useless and annoying, and the community is hostile and rude. I hate this game and I think everyone should avoid it." - PlayStation user
    -
    "This game is a mixed bag. I have played over 200 hours and I have mixed feelings about it. The graphics are good, but not great. The gameplay is fun, but not perfect. The dinosaurs are cool, but not all of them. The story is interesting, but not clear. The mods are nice, but not essential. The community is decent, but not amazing. I like this game, but I don't love it." - Xbox user
    -
    "This game is a waste of potential. I have played over 10 hours and I gave up on it. The graphics are decent, but they don't matter. The gameplay is boring, but they don't change. The dinosaurs are lame, but they don't improve. The story is vague, but it doesn't explain. The mods are buggy, but they don't fix. The community is awful, but it doesn't care. I don't like this game, but I don't hate it." - Xbox user
    -

    How does the game compare to other survival games in the genre?

    -

    The game is often compared to other survival games in the genre, such as Minecraft, Rust, Conan Exiles, Subnautica, and more. Each game has its own strengths and weaknesses, and different players may prefer different games depending on their tastes and preferences. Here are some of the main similarities and differences between ARK: Survival Evolved and some of the other popular survival games:

    - - - - - - - - - - - - - - - - - - - - - - - - - - -
    GameSimilaritiesDifferences
    Minecraft- Both games feature a sandbox-style gameplay that allows players to create and explore in a procedurally generated world.
    - Both games have crafting, building, and mining mechanics that let players use resources to make items and structures.
    - Both games have multiplayer modes that let players cooperate or compete with each other.
    - Minecraft has a pixelated and blocky art style, while ARK has a realistic and detailed art style.
    - Minecraft has a fantasy theme, while ARK has a sci-fi theme.
    - Minecraft has more variety and creativity in terms of items and structures, while ARK has more variety and realism in terms of creatures and biomes.
    Rust- Both games feature a survival gameplay that requires players to manage their basic needs, such as food, water, health, and temperature.
    - Both games have combat mechanics that let players use weapons and tools to fight against enemies.
    - Both games have base building mechanics that let players construct and defend their own bases.
    - Rust has a post-apocalyptic theme, while ARK has a prehistoric theme.
    - Rust has more focus on PvP and raiding, while ARK has more focus on PvE and taming.
    - Rust has more realism and brutality in terms of survival and combat, while ARK has more fantasy and fun in terms of exploration and adventure.
    Conan Exiles- Both games feature a survival gameplay that requires players to manage their basic needs, such as food, water, health, and temperature.
    - Both games have combat mechanics that let players use weapons and tools to fight against enemies.
    - Both games have base building mechanics that let players construct and defend their own bases.
    - Conan Exiles has a barbarian theme, while ARK has a dinosaur theme.
    - Conan Exiles has more focus on melee combat and magic, while ARK has more focus on ranged combat and technology.
    - Conan Exiles has more mature and explicit content, such as nudity and gore, while ARK has more family-friendly content.
    Subnautica- Both games feature a survival gameplay that requires players to manage their basic needs, such as food, water, health, and oxygen.
    - Both games have crafting mechanics that let players use resources to make items and equipment.
    - Both games have exploration mechanics that let players discover new biomes and secrets.
    - Subnautica has an aquatic theme, while ARK has a terrestrial theme.
    - Subnautica has more focus on stealth and evasion, while ARK has more focus on aggression and domination.
    - Subnautica has more horror and suspense elements, while ARK has more action and thrill elements.
    -

    Conclusion

    -

    In conclusion, ARK: Survival Evolved is a game that offers a unique and exciting experience for players who love survival games, dinosaurs, or both. The game has a lot of content and features that can keep players entertained for hours. However, the game also has some flaws and issues that can affect the enjoyment of some players. The game is not for everyone, and it depends on your personal preferences and expectations. If you are looking for a game that lets you live out your fantasy of being a dinosaur tamer in a stunning and challenging world, you might love ARK: Survival Evolved. If you are looking for a game that is easy to play, bug-free, and balanced, you might not like ARK: Survival Evolved. The best way to find out is to try it yourself and see if it suits your taste.

    -

    If you want to learn more about ARK: Survival Evolved, you can visit the official website, the wiki, the forums, the subreddit, or the YouTube channel of the game. You can also watch some gameplay videos or streams from other players to get a better idea of what the game is like. You can also join the community and share your thoughts and experiences with other fans of the game.

    -

    We hope you enjoyed this article and found it helpful and informative. If you have any questions, comments, or feedback, feel free to leave them below. We would love to hear from you and answer your queries. Thank you for reading and have a great day!

    -

    FAQs

    -

    Here are some of the frequently asked questions about ARK: Survival Evolved:

    -

    Q: How long does it take to tame a dinosaur?

    -

    A: It depends on the type, level, and method of taming the dinosaur. Some dinosaurs can be tamed in minutes, while others can take hours or even days. You can use a taming calculator to estimate the time and resources needed to tame a specific dinosaur.

    -

    Q: How do I get metal in the game?

    -

    A: Metal is one of the most valuable and useful resources in the game. You can get metal by mining metal nodes with a pickaxe or an ankylosaurus. Metal nodes are usually found in mountainous or volcanic areas. You can also get metal by smelting metal scraps or ingots in a forge or an industrial forge.

    -

    Q: How do I level up in the game?

    -

    A: You can level up in the game by gaining experience points (XP) from various activities, such as killing creatures, harvesting resources, crafting items, completing missions, and more. You can also get XP from explorer notes, which are hidden documents that reveal the lore of the game. You can also get XP from tributes, which are items that can be used to summon bosses.

    -

    Q: How do I join or create a tribe in the game?

    -

    A: You can join or create a tribe in the game by accessing the tribe menu from your inventory or pause screen. You can then invite or accept other players to join your tribe, or leave or disband your tribe. You can also manage your tribe settings, such as name, logo, rank, permissions, alliances, and more.

    -

    Q: How do I transfer my character or items between servers or maps?

    -

    A: You can transfer your character or items between servers or maps by using obelisks, which are giant towers that act as portals to other ARKs. You can access the obelisk terminal and upload your character or items to the ARK data storage. You can then download them from another obelisk on another server or map. However, some servers or maps may have restrictions or limitations on transferring characters or items.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Delta Touch [7 x Doom engine source port] - A must-have app for Doom fans on Android.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Delta Touch [7 x Doom engine source port] - A must-have app for Doom fans on Android.md deleted file mode 100644 index 4e26e7a77c88788f30f090af4603d1555fe741f2..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Delta Touch [7 x Doom engine source port] - A must-have app for Doom fans on Android.md +++ /dev/null @@ -1,181 +0,0 @@ -
    -

    Delta Touch: The Ultimate Doom Engine Collection for Android

    -

    If you are a fan of the classic first-person shooter game Doom, you probably know that there are many source ports available that enhance the original game with new features, graphics, and compatibility. But did you know that you can play these source ports on your Android device with one app? That app is called Delta Touch, and it is the ultimate collection of Doom engine source ports in one app!

    -

    Delta Touch is a paid app that lets you play seven of the best Doom engine source ports on your Android device. You can enjoy the original Doom, Doom II, Final Doom, Hexen, Heretic, Strife, and even Doom 3 with full touch screen and game-pad support, customizable UI and controls, various rendering modes and options, support for thousands of mods and levels, multiplayer launcher for online gaming, mouse and keyboard support (Android 8 and above), gyro aim assist (Gyroscope needed), and more!

    -

    delta touch 7 x doom engine source port apk


    Download 🌟 https://urlin.us/2uSTUR



    -

    In this article, we will show you the features of Delta Touch, how to install and play it on your Android device, and how to play Doom 3 on Delta Touch. Whether you are a veteran Doom player or a newcomer to the series, you will find something to love about Delta Touch. So read on and discover why Delta Touch is the ultimate Doom engine collection for Android!

    -

    Features of Delta Touch

    -

    Seven Doom engine source ports in one app

    -

    Delta Touch lets you play seven of the best Doom engine source ports on your Android device. These are:

    -

    delta touch 7 x doom engines android game
    -delta touch 7 x gzdoom lzdoom zandronum apk
    -delta touch 7 x doom source ports free download
    -delta touch 7 x doom engines mod support
    -delta touch 7 x doom engines apkcombo
    -delta touch 7 x doom engines latest version
    -delta touch 7 x doom engines open touch gaming
    -delta touch 7 x doom engines play store
    -delta touch 7 x doom engines brutal doom
    -delta touch 7 x doom engines chocolate doom
    -delta touch 7 x doom engines prboom+
    -delta touch 7 x doom engines retrodoom
    -delta touch 7 x doom engines dhewm3
    -delta touch 7 x doom engines full touch screen controls
    -delta touch 7 x doom engines gamepad support
    -delta touch 7 x doom engines custom ui
    -delta touch 7 x doom engines custom buttons
    -delta touch 7 x doom engines community mods and levels
    -delta touch 7 x doom engines midi and fluidsynth samples
    -delta touch 7 x doom engines wad files
    -delta touch 7 x doom engines hexen heretic strife
    -delta touch 7 x doom engines multiplayer launcher
    -delta touch 7 x doom engines mouse and keyboard support
    -delta touch 7 x doom engines gyro aim assist
    -delta touch 7 x doom engines freedoom data included
    -delta touch 7 x doom engines music libraries download
    -delta touch 7 x doom engines high end device needed
    -delta touch 7 x doom engines multitreaded rendering
    -delta touch 7 x doom engines base game roe expansion classic mod
    -delta touch 7 x doom engines mod selection
    -delta touch 7 x doom engines resolution configuration
    -delta touch 7 x doom engines pk4 files needed
    -delta touch 7 x doom engines not bfg edition
    -delta touch 7 x doom engines email for issues
    -delta touch 7 x doom engines full money back guarantee
    -delta touch 7 x doom engines legal icons and graphics
    -delta touch 7 x doom engines gpl source port
    -delta touch 7 x doom engines no copyrighted data
    -how to install delta touch 7 x doom engine source port apk
    -how to play original doom with delta touch 7 x apk
    -how to use mods with delta touch 7 x apk
    -how to configure controls for delta touch 7 x apk
    -how to play multiplayer with delta touch 7 x apk
    -how to play dhewm3 with delta touch 7 x apk
    -how to access cheats and console commands with delta touch 7 x apk
    -how to update to the latest version of delta touch 7 x apk
    -how to get the best performance with delta touch 7 x apk
    -how to get refund for delta touch 7 x apk
    -how to contact developer of delta touch 7 x apk

    - -

    With Delta Touch, you can switch between these source ports easily and enjoy the different aspects of Doom that they offer. You can also mix and match them with various mods and levels to create your own custom Doom experience.

    -

    Full touch screen and game-pad support

    -

    Delta Touch lets you play Doom on your Android device with full touch screen and game-pad support. You can use the virtual buttons on the screen to control your movement, shooting, switching weapons, opening doors, etc. You can also customize the size, position, and opacity of the buttons to suit your preference. You can also use gestures such as swiping, tapping, pinching, etc. to perform actions such as quick save, quick load, zoom in, zoom out, etc.

    -

    If you prefer to use a game-pad, Delta Touch supports most Bluetooth and USB game-pads that are compatible with Android. You can map the buttons and sticks of your game-pad to the actions of Doom. You can also use the analog sticks to control your movement and aiming. Delta Touch supports vibration feedback for game-pads that have it.

    -

    Whether you use touch screen or game-pad, Delta Touch lets you play Doom comfortably and conveniently on your Android device.

    -

    Customizable UI and controls

    -

    Delta Touch lets you customize the UI and controls of Doom to your liking. You can change the size and position of the HUD elements such as health, armor, ammo, keys, etc. You can also change the color and transparency of the HUD elements. You can also enable or disable various UI features such as crosshair, messages, automap, status bar, etc.

    -

    You can also customize the controls of Doom to suit your play style. You can change the sensitivity and acceleration of your movement and aiming. You can also enable or disable various control features such as auto-aiming, auto-use, auto-run, invert look, look spring, strafe on turn, etc.

    -

    With Delta Touch, you can make Doom look and feel the way you want it to.

    -

    Various rendering modes and options

    -

    Delta Touch lets you choose from various rendering modes and options to enhance the graphics of Doom. You can choose from software rendering or hardware rendering depending on your device's capabilities and your preference. Software rendering is more faithful to the original game's graphics but has lower resolution and fewer effects. Hardware rendering uses OpenGL to improve the graphics with higher resolution and more effects.

    -

    You can also choose from various rendering options such as texture filtering, dynamic lighting, shadows, fog, bloom, lens flares, ambient occlusion, anti-aliasing, etc. These options can make Doom look more realistic or more stylized depending on your taste. You can also adjust the brightness, contrast, gamma, saturation, etc. of the graphics to suit your vision.

    -

    With Delta Touch , you can make Doom look as good as possible on your Android device.

    -

    Support for thousands of mods and levels

    -

    Delta Touch lets you play thousands of mods and levels that have been created by the Doom community over the years. You can download and install these mods and levels from various sources such as Doomworld, ModDB, Wad Archive, etc. You can also copy your own wad files to your device and play them with Delta Touch.

    -

    You can play mods and levels that add new weapons, monsters, items, graphics, sounds, music, maps, etc. to Doom. You can also play mods and levels that change the gameplay, story, theme, genre, etc. of Doom. You can also play mods and levels that are based on other games, movies, TV shows, books, etc.

    -

    With Delta Touch, you can enjoy the endless variety and creativity of the Doom community on your Android device.

    -

    Multiplayer launcher for online gaming

    -

    Delta Touch lets you play Doom online with other players using the multiplayer launcher. You can join or host servers that run Zandronum or Chocolate Doom source ports. You can play various game modes such as co-op, deathmatch, capture the flag, invasion, survival, team last man standing, domination, terminator, possession, clan arena, etc. You can also chat with other players using the in-game chat feature.

    -

    You can also use Delta Touch to play Doom locally with other players using the same Wi-Fi network. You can use the LAN feature to create or join servers that run any of the source ports supported by Delta Touch. You can also use Bluetooth to connect with other players who have Delta Touch installed on their devices.

    -

    With Delta Touch, you can have fun playing Doom with your friends or strangers online or offline.

    -

    Mouse and keyboard support (Android 8 and above)

    -

    Delta Touch lets you play Doom with mouse and keyboard on your Android device if you have Android 8 or above. You can connect a mouse and keyboard to your device via Bluetooth or USB and use them to control Doom. You can also customize the mouse sensitivity and acceleration and the keyboard bindings to suit your preference.

    -

    Playing Doom with mouse and keyboard can give you more precision and accuracy than touch screen or game-pad. It can also make you feel more immersed in the game as if you were playing it on a PC.

    -

    With Delta Touch, you can play Doom with mouse and keyboard on your Android device just like on a PC.

    -

    Gyro aim assist (Gyroscope needed)

    -

    Delta Touch lets you use gyro aim assist to help you aim better in Doom. Gyro aim assist is a feature that uses the gyroscope sensor of your device to detect your device's tilt and movement and adjust your aiming accordingly. You can use gyro aim assist along with touch screen or game-pad controls to fine-tune your aiming.

    -

    Gyro aim assist can make aiming easier and smoother in Doom. It can also make you feel more involved in the game as if you were holding a real weapon.

    -

    With Delta Touch , you can use gyro aim assist to enhance your aiming in Doom.

    -

    How to install and play Delta Touch

    -

    Requirements and compatibility

    -

    To install and play Delta Touch, you need an Android device that meets the following requirements:

    - -

    Delta Touch is compatible with most Android devices, but some devices may have issues with performance, graphics, sound, or controls. You can check the compatibility list on the Delta Touch website or the Google Play Store page to see if your device is supported or not. You can also contact the developer via email or Discord if you encounter any problems or have any suggestions.

    -

    Downloading and installing the app

    -

    To download and install Delta Touch, you need to purchase it from the Google Play Store for $2.99. You can use this link to go to the Google Play Store page of Delta Touch: Delta Touch - The Ultimate Doom Engine Collection - Apps on Google Play

    -

    Once you have purchased the app, you can download and install it on your device. The app size is about 90 MB, so make sure you have enough storage space and a stable internet connection. The installation process should take a few minutes depending on your device and network speed.

    -

    Copying your own wad files to your device

    -

    To play Delta Touch, you need to copy your own wad files to your device. Wad files are the game files that contain the data for Doom, Doom II, Final Doom, Hexen, Heretic, Strife, and Doom 3. You can get these wad files from various sources such as Steam, GOG.com, Humble Bundle, etc. You can also use your own physical copies of the games if you have them.

    -

    To copy your wad files to your device, you need to connect your device to your PC via USB cable or Wi-Fi. You need to enable file transfer mode on your device and locate the folder where Delta Touch is installed on your device. The folder name should be something like /sdcard/Android/data/com.opentouchgaming.deltatouch/files/. You need to create a subfolder named /Doom/ inside this folder and copy your wad files there. The folder structure should look something like this:

    -
    /sdcard/Android/data/com.opentouchgaming.deltatouch/files/Doom/     doom.wad     doom2.wad     tnt.wad     plutonia.wad     hexen.wad     heretic.wad     strife1.wad     doom3-base.pk4     ... 
    -

    You need to copy all the wad files that you want to play with Delta Touch to this folder. You can also copy any mod or level wad files that you want to play with Delta Touch to this folder.

    -

    Selecting and launching a source port

    -

    To select and launch a source port with Delta Touch , you need to open the app and tap on the source port icon on the main menu. You will see a list of the seven source ports that Delta Touch supports. You can tap on any of them to select it and launch it. You can also swipe left or right on the source port icon to switch between them quickly.

    -

    Once you have selected and launched a source port, you will see a list of the wad files that you have copied to your device. You can tap on any of them to load it and start playing. You can also tap on the plus icon to add more wad files from your device or from online sources. You can also tap on the minus icon to remove any wad files that you don't want to play.

    -

    With Delta Touch, you can select and launch any of the seven source ports with ease and play any of the wad files that you have on your device or online.

    -

    Configuring your settings and controls

    -

    To configure your settings and controls with Delta Touch, you need to tap on the gear icon on the main menu or in-game. You will see a list of options that let you customize various aspects of Delta Touch. You can tap on any of them to access their sub-options and adjust them to your preference. Some of the options that you can configure are:

    - -

    With Delta Touch , you can configure your settings and controls with Delta Touch to optimize your Doom experience on your Android device.

    -

    Doom 3 on Delta Touch

    -

    What is Dhewm3 and how it differs from other source ports

    -

    Dhewm3 is a source port that lets you play Doom 3 on your Android device with Delta Touch. It is based on the original Doom 3 source code that was released by id Software in 2011. It improves the original game with bug fixes, compatibility, and performance enhancements. It also supports Doom 3 mods and levels that are compatible with the original game.

    -

    Dhewm3 differs from other source ports in Delta Touch in several ways. First, it is the only source port that supports Doom 3, which is a different game from the other Doom games. Doom 3 is a horror-themed game that uses advanced graphics, physics, and sound effects to create a more immersive and realistic experience. Second, it requires more powerful hardware and storage space than the other source ports. You need a device that has at least 2 GB of RAM and 4 GB of free storage space to play Doom 3 on Delta Touch. Third, it has its own settings and controls that are separate from the other source ports. You need to configure them separately to suit your preference.

    -

    With Dhewm3, you can play Doom 3 on your Android device with Delta Touch and enjoy a different kind of Doom experience.

    -

    How to play Doom 3 on Delta Touch

    -

    To play Doom 3 on Delta Touch, you need to follow these steps:

    -
      -
    1. Download and install Delta Touch from the Google Play Store if you haven't already.
    2. -
    3. Copy your own wad files for Doom 3 to your device. You need to copy the doom3-base.pk4 file and any other .pk4 files that are part of the game or the mods that you want to play. You need to copy them to the same folder where you copied your other wad files for the other source ports. The folder name should be something like /sdcard/Android/data/com.opentouchgaming.deltatouch/files/Doom/.
    4. -
    5. Open Delta Touch and tap on the Dhewm3 icon on the main menu. You will see a list of the .pk4 files that you have copied to your device. You can tap on any of them to load it and start playing. You can also tap on the plus icon to add more .pk4 files from your device or from online sources. You can also tap on the minus icon to remove any .pk4 files that you don't want to play.
    6. -
    7. Configure your settings and controls for Dhewm3. You can tap on the gear icon on the main menu or in-game to access the options menu. You can change various aspects of Dhewm3 such as video, audio, controls, UI, etc. You can also use console commands, cheat codes, save states, screenshots, etc.
    8. -
    -

    With these steps, you can play Doom 3 on Delta Touch and have fun with it.

    -

    Tips and tricks for optimizing your performance and experience

    -

    To optimize your performance and experience when playing Doom 3 on Delta Touch , you can follow these tips and tricks:

    - -

    With these tips and tricks, you can optimize your performance and experience when playing Doom 3 on Delta Touch.

    -

    Conclusion

    -

    Delta Touch is the ultimate collection of Doom engine source ports for Android. It lets you play seven of the best source ports on your Android device with full touch screen and game-pad support, customizable UI and controls, various rendering modes and options, support for thousands of mods and levels, multiplayer launcher for online gaming, mouse and keyboard support (Android 8 and above), gyro aim assist (Gyroscope needed), and more!

    -

    You can also play Doom 3 on Delta Touch with Dhewm3, a source port that improves the original game with bug fixes, compatibility, and performance enhancements. You can enjoy a different kind of Doom experience with advanced graphics, physics, and sound effects.

    -

    Whether you are a veteran Doom player or a newcomer to the series, you will find something to love about Delta Touch. So what are you waiting for? Download Delta Touch today and enjoy the best of Doom on your Android device!

    -

    Frequently Asked Questions

    -

    Q: How much does Delta Touch cost?

    -

    A: Delta Touch costs $2.99 on the Google Play Store. You can use this link to go to the Google Play Store page of Delta Touch: Delta Touch - The Ultimate Doom Engine Collection - Apps on Google Play

    -

    Q: Where can I get wad files for Delta Touch?

    -

    A: You can get wad files for Delta Touch from various sources such as Steam, GOG.com, Humble Bundle, etc. You can also use your own physical copies of the games if you have them. You can also download mods and levels from various online sources such as Doomworld, ModDB, Wad Archive, etc.

    -

    Q: How can I contact the developer of Delta Touch?

    -

    A: You can contact the developer of Delta Touch via email or Discord. The email address is opentouchgaming@gmail.com. The Discord server is OpenTouchGaming.

    -

    Q: What are some of the best mods and levels for Delta Touch?

    -

    A: There are thousands of mods and levels for Delta Touch that you can play and enjoy. Some of the best ones are:

    - -

    You can find more mods and levels for Delta Touch on the Delta Touch website or the Google Play Store page.

    -

    Q: How can I play Doom 3 mods and levels on Delta Touch?

    -

    A: You can play Doom 3 mods and levels on Delta Touch with Dhewm3, a source port that supports Doom 3 mods and levels that are compatible with the original game. You need to copy the .pk4 files of the mods and levels that you want to play to the same folder where you copied your doom3-base.pk4 file. The folder name should be something like /sdcard/Android/data/com.opentouchgaming.deltatouch/files/Doom/. You can then load them with Dhewm3 and play them.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA 5 for Xbox 360 Free and Enjoy the Ultimate Open World Game.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA 5 for Xbox 360 Free and Enjoy the Ultimate Open World Game.md deleted file mode 100644 index b5ad36aa005d6ece454e2e7f9d490ce709d425fa..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA 5 for Xbox 360 Free and Enjoy the Ultimate Open World Game.md +++ /dev/null @@ -1,152 +0,0 @@ -
    -

    GTA 5 for Xbox 360 Free Download: How to Play the Ultimate Open World Game

    -

    If you are a fan of action-adventure games, you have probably heard of Grand Theft Auto V, or GTA 5 for short. This game is one of the most successful and acclaimed titles in the history of video games, selling over 150 million copies worldwide and receiving numerous awards and accolades. But did you know that you can play GTA 5 for Xbox 360 for free? In this article, we will show you how to get, install, and enjoy this amazing game on your console without spending a dime.

    -

    What is GTA 5 and why is it so popular?

    -

    GTA 5 is the fifth main installment in the Grand Theft Auto series, developed by Rockstar Games and released in 2013. It is an open world game that allows you to explore a vast and diverse fictional city called Los Santos, based on Los Angeles, and its surrounding areas. You can choose to follow the story mode, which involves three protagonists with different backgrounds and personalities, or engage in various activities and missions in the online mode, which supports up to 30 players.

    -

    gta 5 for xbox 360 free download


    DOWNLOAD === https://urlin.us/2uSVar



    -

    The story and gameplay of GTA 5

    -

    The story mode of GTA 5 follows the lives of Michael De Santa, a retired bank robber who lives a luxurious but unhappy life under witness protection; Franklin Clinton, a young street hustler who works as a repo man for a shady car dealer; and Trevor Philips, a former partner of Michael who is now a violent and unstable drug lord. Their paths cross when they are forced to work together to pull off a series of daring heists that involve some of the most powerful and dangerous people in the city.

    -

    The gameplay of GTA 5 is based on the concept of freedom and choice. You can switch between the three characters at any time, each with their own skills, abilities, and personal stories. You can also customize their appearance, vehicles, weapons, and properties. You can explore the vast open world by foot, car, bike, boat, plane, helicopter, or parachute. You can interact with various characters, objects, and events in the world. You can participate in various activities such as racing, golfing, tennis, hunting, yoga, darts, strip clubs, cinemas, amusement parks, casinos, nightclubs, and more. You can also cause chaos and mayhem by fighting with pedestrians, police officers, gangs, or rival criminals.

    -

    The features and enhancements of GTA 5 for Xbox 360

    -

    GTA 5 for Xbox 360 is not just a port of the original game. It is a enhanced version that offers several improvements and additions that make it more enjoyable and immersive. Some of these features are:

    - -

    With these features, GTA 5 for Xbox 360 is more than just a game. It is a masterpiece of entertainment that will keep you hooked for hours.

    -

    gta 5 xbox 360 download free full version
    -how to download gta 5 for free on xbox 360
    -gta 5 xbox 360 iso download free
    -gta 5 xbox 360 free download no survey
    -gta 5 xbox 360 free download usb
    -gta 5 xbox 360 free download code
    -gta 5 xbox 360 free download torrent
    -gta 5 xbox 360 free download online
    -gta 5 xbox 360 free download jtag
    -gta 5 xbox 360 free download mod menu
    -gta 5 xbox 360 free download mega
    -gta 5 xbox 360 free download mediafire
    -gta 5 xbox 360 free download highly compressed
    -gta 5 xbox 360 free download with multiplayer
    -gta 5 xbox 360 free download no verification
    -gta 5 xbox 360 free download no password
    -gta 5 xbox 360 free download no jailbreak
    -gta 5 xbox 360 free download no disc
    -gta 5 xbox 360 free download no human verification
    -gta 5 xbox 360 free download no offers
    -gta 5 xbox 360 free download direct link
    -gta 5 xbox 360 free download google drive
    -gta 5 xbox 360 free download apk
    -gta 5 xbox 360 free download reddit
    -gta 5 xbox 360 free download youtube
    -gta v for xbox 360 free download
    -how to get gta v for free on xbox 360
    -gta v xbox 360 iso free download
    -gta v xbox 360 free download no survey
    -gta v xbox 360 free download usb
    -gta v xbox 360 free download code
    -gta v xbox 360 free download torrent
    -gta v xbox 360 free download online
    -gta v xbox 360 free download jtag
    -gta v xbox 360 free download mod menu
    -gta v xbox 360 free download mega
    -gta v xbox 360 free download mediafire
    -gta v xbox 360 free download highly compressed
    -gta v xbox 360 free download with multiplayer
    -grand theft auto v for xbox 360 free download
    -how to get grand theft auto v for free on xbox 360
    -grand theft auto v xbox 360 iso free download
    -grand theft auto v xbox 360 free download no survey
    -grand theft auto v xbox 360 free download usb
    -grand theft auto v xbox 360 free download code

    -

    How to get GTA 5 for Xbox 360 for free?

    -

    Now that you know what GTA 5 is and why it is so awesome, you might be wondering how to get it for your Xbox 360 for free. Well, there are two ways to do that: the official way and the unofficial way. Let's see what they are and how they work.

    -

    The official way: buy the game and download it from Xbox Live

    -

    The official way to get GTA 5 for Xbox 360 for free is to buy the game and download it from Xbox Live. This might sound contradictory, but hear me out. If you have an Xbox Live Gold membership, which costs $9.99 per month or $59.99 per year, you can access the Games with Gold program, which offers two free games every month for Xbox 360 and Xbox One. Sometimes, GTA 5 is one of those games, so you can download it and keep it forever without paying anything extra.

    -

    The advantages of this method are that you get a legitimate copy of the game that is compatible with your console and online services, and that you also get access to other free games and discounts every month. The disadvantages are that you have to pay for the Xbox Live Gold membership, which might not be worth it if you don't play online or use other features, and that you have to wait until GTA 5 is available as a free game, which might take a long time or never happen.

    -

    The unofficial way: download the game and DLCs from online sources

    -

    The unofficial way to get GTA 5 for Xbox 360 for free is to download the game and DLCs from online sources such as torrents, file-sharing sites, or forums. This method involves finding a reliable source that offers the game files in ISO or RGH format, downloading them to your computer, transferring them to a USB drive or an external hard drive, and installing them on your console using a modded dashboard or a flash drive.

    -

    The pros and cons of the unofficial way

    -

    The advantages of this method are that you can get the game and DLCs for free without paying anything or waiting for anything, and that you can also get access to mods and cheats that enhance your gameplay experience. The disadvantages are that you need to have a modded console or a flash drive that can bypass the security system of your console, which might void your warranty or get you banned from online services, and that you also need to have enough storage space and technical knowledge to install the game properly.

    -

    The risks and precautions of the unofficial way

    -

    The risks of this method are that you might download corrupted or infected files that can damage your console or your computer, or that you might download fake or incomplete files that won't work or will crash your game. You might also face legal issues if you are caught downloading or distributing pirated content, which is illegal in most countries. The precautions of this method are that you should always scan the files before downloading them, use a VPN or a proxy to hide your IP address, and backup your data before installing anything on your console.

    -

    How to install and play GTA 5 for Xbox 360 for free?

    -

    Now that you know how to get GTA 5 for Xbox 360 for free, let's see how to install and play it on your console. Depending on which method you chose, the steps might vary slightly, but here are the general guidelines:

    -

    The requirements and steps for installing the game

    -

    The requirements for installing the game are:

    - -

    The steps for installing the game are:

    -
      -
    1. Download the game files from Xbox Live or from an online source to your computer.
    2. -
    3. Extract the files using a program like WinRAR or 7-Zip if they are compressed.
    4. -
    5. Copy the files to your USB drive or external hard drive using a program like XBOX ISO Extractor or Horizon.
    6. -
    7. Plug your USB drive or external hard drive into your console.
    8. -
    9. Launch the game from your modded dashboard or flash drive (if you chose the unofficial way) or from your normal dashboard (if you chose the official way).
    10. -
    -

    Congratulations, you have successfully installed GTA 5 for Xbox 360 for free. Now you can enjoy the game and have fun.

    -

    The tips and tricks for playing the game

    -

    Playing GTA 5 for Xbox 360 for free is not much different from playing the paid version. However, there are some tips and tricks that can help you get the most out of the game and avoid some common problems. Here are some of them:

    - -

    Conclusion

    -

    GTA 5 for Xbox 360 is one of the best games ever made. It offers a rich and immersive open world experience that will keep you entertained for hours. Whether you follow the story mode or explore the online mode, you will find something to suit your taste and style. And the best part is that you can play it for free by following the methods we explained in this article.

    -

    Summary of the main points

    -

    In this article, we have covered:

    - -

    Call to action and final thoughts

    -

    If you are ready to play GTA 5 for Xbox 360 for free, don't wait any longer. Download the game today and start your adventure in Los Santos. You won't regret it.

    -

    We hope you enjoyed this article and found it useful. If you did, please share it with your friends and leave us a comment below. We would love to hear your feedback and suggestions. And if you have any questions or problems regarding GTA 5 for Xbox 360 free download, feel free to ask us. We will try our best to help you.

    -

    Frequently Asked Questions

    -

    Here are some of the most common questions that people ask about GTA 5 for Xbox 360 free download:

    -

    Q: Is GTA 5 for Xbox 360 still playable in 2023?

    -

    A: Yes, GTA 5 for Xbox 360 is still playable in 2023. However, some features might not work properly or be discontinued due to updates or changes in the online services. For example, some online modes or events might not be available or have fewer players. Also, some DLCs or updates might not be compatible with the Xbox 360 version of the game.

    -

    Q: Can I play GTA 5 for Xbox 360 on Xbox One or Xbox Series X/S?

    -

    A: Yes, you can play GTA 5 for Xbox 360 on Xbox One or Xbox Series X/S using the backward compatibility feature. However, you will need to have a physical disc of the game or a digital copy downloaded from Xbox Live. You will also need to download an update that will optimize the game for your console. You will not be able to play GTA 5 for Xbox 360 on Xbox One or Xbox Series X/S using a modded console or a flash drive.

    -

    Q: Can I transfer my GTA 5 progress from Xbox 360 to another console or platform?

    -

    A: Yes, you can transfer your GTA 5 progress from Xbox 360 to another console or platform using the Rockstar Social Club service. However, you will need to have a valid and linked account on both platforms, and you will only be able to transfer your online progress, not your story progress. You will also need to do this before March 6, 2023, as Rockstar will stop supporting this feature after that date.

    -

    Q: How can I avoid getting banned from online services when playing GTA 5 for Xbox 360 for free?

    -

    A: There is no guarantee that you can avoid getting banned from online services when playing GTA 5 for Xbox 360 for free, especially if you use the unofficial way. However, there are some precautions that you can take to reduce the chances of getting detected or reported. Some of them are:

    - -

    If you follow these tips, you might be able to play GTA 5 for Xbox 360 for free without getting banned. However, you should always be aware of the risks and consequences of doing so.

    -

    Q: What are some of the best mods and cheats for GTA 5 for Xbox 360?

    -

    A: There are many mods and cheats for GTA 5 for Xbox 360 that can enhance your gameplay experience and add more fun and variety to the game. However, some of them might not work properly or be compatible with your console or the online services. Therefore, you should always check the reviews and ratings of the mods and cheats before downloading and installing them. Some of the best mods and cheats for GTA 5 for Xbox 360 are:

    -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Stickman Shinobi Mod Menu and Experience the Fun of Ninja Fighting.md b/spaces/1phancelerku/anime-remove-background/Download Stickman Shinobi Mod Menu and Experience the Fun of Ninja Fighting.md deleted file mode 100644 index 9c9eaefedf338dfa6b0540304071229a1afd9cb7..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Stickman Shinobi Mod Menu and Experience the Fun of Ninja Fighting.md +++ /dev/null @@ -1,147 +0,0 @@ - -

    How to Download Stickman Shinobi Mod Menu and Enjoy Unlimited Features

    -

    Do you love stickman games, ninja legends, martial arts, and nonstop combats? If yes, then you should definitely try Stickman Shinobi, a new arcade game that will take you to a whole new world of elite ninjas and dangerous villains. In this game, you can choose from a large variety of powerful ninjas and warriors, each with their own unique styles of fighting, assassination, and ultimate skills. You can also explore different maps, levels, and bosses, as well as join tournaments and compete with other players.

    -

    But what if you want to have more fun and excitement in this game? What if you want to unlock all the characters, get unlimited money, gems, and tickets, and access a mega menu with more options and features? Well, there is a way to do that, and it is called Stickman Shinobi Mod Menu. In this article, we will show you how to download this mod menu, how to install it, how to use it, and what are the benefits of using it. We will also give you some tips and tricks to master this game and win every battle. So, let's get started!

    -

    download stickman shinobi mod menu


    Download Filehttps://jinyurl.com/2uNP2m



    -

    What is Stickman Shinobi and Why You Should Play It

    -

    Stickman Shinobi is a fighting arcade game published by NAGOO STUDIO. It is inspired by the popular manga and anime series Naruto, which features ninjas with supernatural abilities. The game has over 10 million downloads on Google Play Store and has received positive reviews from players. Here are some of the reasons why you should play this game:

    -

    The Game Features

    -

    Stickman Shinobi has many features that make it an enjoyable and immersive game. Some of these features are:

    - -

    The Game Modes

    -

    Stickman Shinobi has two main game modes that you can choose from:

    - -

    The Game Characters

    -

    Stickman Shinobi has over 100 legendary ninjas that you can play with or fight against. Each character has their own unique skills, stats, appearance, voice effects, and personality. Some of the characters are:

    - -Name - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Rinnegan, Mangekyo Sharingan, Wood Style, Limbo, etc.Konoha (Hidden Leaf Village)JiraiyaRasengan, Toad Summoning, Sage Mode, Fire Style, etc.Konoha (Hidden Leaf Village)OrochimaruSnake Summoning, Immortality, Body Transfer, Curse Mark, etc.Konoha (Hidden Leaf Village)HinataByakugan, Gentle Fist, Twin Lion Fists, Protection of the Eight Trigrams Sixty-Four Palms, etc.Konoha (Hidden Leaf Village)And many more! You can unlock all of them by playing the game or by using the mod menu that we will talk about later.

    What is Stickman Shinobi Mod Menu and What are the BenefitsIf you want to have more fun and excitement in Stickman Shinobi, you might want to try the mod menu that we have prepared for you. This mod menu is a modified version of the game that gives you access to many features and options that are not available in the original game. Some of the benefits of using this mod menu are:

    The Mod FeaturesThe mod menu has many features that will make your gameplay easier and more enjoyable. Some of these features are:

    Unlimited money: You can get as much money as you want and use it to buy items, weapons, power-ups, etc.Unlimited gems: You can get as many gems as you want and use them to unlock new characters and upgrade their skills.Unlimited tickets: You can get as many tickets as you want and use them to enter tournaments and win rewards.All characters unlocked: You can play with any character you want without having to unlock them first.Mega menu: You can access a mega menu that gives you more options and features, such as changing the game speed, skipping levels, enabling god mode, etc.No ads: You can enjoy the game without any annoying ads or pop-ups.No root required: You can install and use the mod menu without rooting your device or risking any damage.Easy to use: You can easily activate and deactivate the mod features with a simple tap on the screen.Safe and secure: You can use the mod menu without worrying about any viruses or malware. The mod menu is tested and verified by our team of experts.Free to download: You can download the mod menu for free from our website. We do not charge any fees or subscriptions for our service.The Mod InstallationThe mod installation is very easy and simple. All you need to do is follow these steps:

    Delete the original game from your device if you have it installed.Download the mod menu APK file from our website. You can find the download link at the end of this article.Enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play Store.
  10. Install the mod menu APK file on your device by tapping on it and following the instructions.
  11. -
  12. Launch the game and enjoy the mod menu features.
  13. - -

    The Mod Usage

    -

    The mod usage is also very easy and simple. All you need to do is follow these steps:

    -
      -
    1. Open the game and tap on the mod menu icon on the top left corner of the screen.
    2. -
    3. Select the mod features that you want to activate or deactivate. You can also access the mega menu by tapping on the M button.
    4. -
    5. Play the game and have fun with the mod menu features.
    6. -
    -

    Tips and Tricks to Master Stickman Shinobi and Win Every Battle

    -

    Now that you know how to download and use the mod menu, you might want to learn some tips and tricks to improve your skills and performance in Stickman Shinobi. Here are some of them:

    -

    Choose the Right Character for Your Style

    -

    One of the most important things in Stickman Shinobi is to choose the right character for your style of playing. Each character has their own advantages and disadvantages, as well as different abilities and stats. You should try to find a character that suits your preferences and goals. For example, if you like fast and agile fighters, you might want to choose Naruto, Sasuke, or Hinata. If you like strong and durable fighters, you might want to choose Gaara, Madara, or Jiraiya. If you like versatile and balanced fighters, you might want to choose Kakashi, Itachi, or Sakura.

    -

    download stickman shinobi mod apk unlimited money
    -download stickman shinobi mega menu mod free
    -download stickman shinobi ninja warrior mod menu
    -download stickman shinobi mod menu latest version
    -download stickman shinobi mod menu for android
    -download stickman shinobi mod menu no root
    -download stickman shinobi mod menu offline
    -download stickman shinobi mod menu 2023
    -download stickman shinobi mod menu hack
    -download stickman shinobi mod menu cheat
    -download stickman shinobi mod menu unlocked
    -download stickman shinobi mod menu premium
    -download stickman shinobi mod menu pro
    -download stickman shinobi mod menu vip
    -download stickman shinobi mod menu god mode
    -download stickman shinobi mod menu one hit kill
    -download stickman shinobi mod menu unlimited gems
    -download stickman shinobi mod menu unlimited coins
    -download stickman shinobi mod menu unlimited skills
    -download stickman shinobi mod menu unlimited energy
    -download stickman shinobi mod menu unlimited characters
    -download stickman shinobi mod menu unlimited weapons
    -download stickman shinobi mod menu unlimited costumes
    -download stickman shinobi mod menu unlimited items
    -download stickman shinobi mod menu unlimited levels
    -download stickman shinobi mod menu unlimited missions
    -download stickman shinobi mod menu unlimited modes
    -download stickman shinobi mod menu unlimited features
    -download stickman shinobi mod menu easy install
    -download stickman shinobi mod menu safe and secure
    -download stickman shinobi mod menu virus free
    -download stickman shinobi mod menu malware free
    -download stickman shinobi mod menu ad free
    -download stickman shinobi mod menu no survey
    -download stickman shinobi mod menu no verification
    -download stickman shinobi mod menu direct link
    -download stickman shinobi mod menu fast and reliable
    -download stickman shinobi mod menu high quality graphics
    -download stickman shinobi mod menu realistic physics
    -download stickman shinobi mod menu smooth gameplay
    -download stickman shinobi mod menu fun and addictive
    -download stickman shinobi mod menu best arcade game 2023
    -download stickman shinobi mod menu reviews and ratings
    -download stickman shinobi mod menu tips and tricks
    -download stickman shinobi mod menu guides and tutorials
    -download stickman shinobi mod menu updates and news

    -

    Use the Ultimate Skills Wisely

    -

    Another important thing in Stickman Shinobi is to use the ultimate skills wisely. Each character has their own ultimate skill that can deal massive damage and change the course of the battle. However, these skills have a cooldown time and require a certain amount of chakra to use. You should try to use them at the right moment and not waste them unnecessarily. For example, you might want to use them when you are facing a tough boss or a strong opponent, when you are outnumbered or surrounded, or when you need a finishing blow or a comeback.

    -

    Upgrade Your Strength and Enhance Your Abilities

    -

    A third important thing in Stickman Shinobi is to upgrade your strength and enhance your abilities. As you play the game, you will earn money, gems, tickets, and other rewards that you can use to buy items, weapons, power-ups, etc. You can also use them to upgrade your character's stats, such as health, attack, defense, speed, etc. You should try to spend your resources wisely and invest in the things that will help you improve your performance and overcome the challenges. For example, you might want to buy a sword that increases your attack power, a shield that increases your defense power, or a potion that restores your health.

    -

    Learn from the Bosses and the Tournaments

    -

    A fourth important thing in Stickman Shinobi is to learn from the bosses and the tournaments. As you play the game, you will encounter many bosses and opponents that will test your skills and abilities. You should try to learn from them and see what they do well and what they do poorly. You should also try to copy their moves and strategies and apply them to your own gameplay. For example, you might want to learn how to dodge their attacks, how to counter their skills, how to exploit their weaknesses, etc.

    -

    Conclusion and FAQs

    -

    In conclusion, Stickman Shinobi is a fun and exciting game that will keep you entertained for hours. You can enjoy playing with many legendary ninjas from Naruto, exploring different maps and levels, joining tournaments and competing with other players, etc. You can also download our mod menu that will give you access to many features and options that will make your gameplay easier and more enjoyable. You can get unlimited money, gems, tickets, unlock all characters, access a mega menu, etc. All you need to do is follow our instructions on how to download, install, and use the mod menu.

    -

    We hope that this article has helped you understand more about Stickman Shinobi and our mod menu. If you have any questions or feedbacks about this topic, please feel free to contact us through our website or email. We will be happy to assist you with anything related to this game or our service.

    -

    Here are some FAQs that might answer some of your queries:

    -
      -
    1. Q: Is Stickman Shinobi Mod Menu safe to use?
    2. -
    3. A: Yes, it is safe to use. Our mod menu is tested and verified by our team of experts. It does not contain any viruses or malware that can harm your device or data. It also does not require root access or any special permissions to use.
    4. -
    5. Q: How can I update Stickman Shinobi Mod Menu?
    6. -
    7. A: You can update the mod menu by visiting our website and downloading the latest version of the mod menu APK file. You can also check our website regularly for any news or updates about the game or the mod menu.
    8. -
    9. Q: How can I uninstall Stickman Shinobi Mod Menu?
    10. -
    11. A: You can uninstall the mod menu by deleting the mod menu APK file from your device. You can also reinstall the original game from Google Play Store if you want to play it without the mod menu.
    12. -
    13. Q: Can I use Stickman Shinobi Mod Menu with other mods or cheats?
    14. -
    15. A: We do not recommend using the mod menu with other mods or cheats, as they might cause conflicts or errors in the game. Our mod menu already provides you with everything you need to enjoy the game to the fullest.
    16. -
    17. Q: Can I get banned or reported for using Stickman Shinobi Mod Menu?
    18. -
    19. A: We do not guarantee that you will not get banned or reported for using the mod menu, as it depends on the game developers and their policies. However, we have not received any reports of such cases so far, and we try our best to make the mod menu undetectable and safe to use. We also advise you to use the mod menu responsibly and not abuse it or ruin the game experience for other players.
    20. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download WA GB APK Pro v17.40 - The Best WhatsApp Mod for Android [2023].md b/spaces/1phancelerku/anime-remove-background/Download WA GB APK Pro v17.40 - The Best WhatsApp Mod for Android [2023].md deleted file mode 100644 index b6891028c49ab5feaf90601693550019b18f43a4..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download WA GB APK Pro v17.40 - The Best WhatsApp Mod for Android [2023].md +++ /dev/null @@ -1,101 +0,0 @@ -
    -

    Download WA GB APK Pro: A Modified Version of WhatsApp with More Features and Customization

    -

    If you are looking for a way to enhance your WhatsApp experience, you might want to try WA GB APK Pro. This is a modified version of the popular instant messaging app that offers a host of additional features and customization options that are not available in the standard version of the app. In this article, we will explore what WA GB APK Pro is, its key features, how to download and install it on your Android device, and its pros and cons.

    -

    download wa gb apk pro


    Download Ziphttps://jinyurl.com/2uNNWR



    -

    Key Features of WA GB APK Pro

    -

    WA GB APK Pro offers a range of features that are not available in the standard version of WhatsApp. Some of the key features of the app include:

    - -

    How to Download and Install WA GB APK Pro on Android

    -

    If you want to try out WA GB APK Pro on your Android device, you need to follow these steps:

    -
      -
    1. Step 1: Enable unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
    2. -
    3. Step 2: Download the latest version of WA GB APK Pro from a trusted source. You can find the download link at [text](^1^) or [text](^2^) or [text](^3^).
    4. -
    5. Step 3: Install the app and verify your phone number. To do this, open the downloaded file and follow the instructions on the screen. You will need to enter your phone number and verify it with an OTP code.
    6. -
    7. Step 4: Enjoy the app and its features. You can now use WA GB APK Pro to chat with your friends and family, and enjoy the extra features and customization options that the app offers.
    8. -
    -

    Pros and Cons of WA GB APK Pro

    -

    WA GB APK Pro is not an official app, and therefore, it has some advantages and disadvantages that you should be aware of before using it. Here are some of the pros and cons of the app:

    -

    download gb whatsapp pro latest version apk
    -download gb whatsapp pro mod apk 2023
    -download gb whatsapp pro official terbaru 2023
    -download gb whatsapp pro anti ban apk
    -download gb whatsapp pro alex mods apk
    -download gb whatsapp pro free no ads apk
    -download gb whatsapp pro update 2023 apk
    -download gb whatsapp pro v17.40 apk
    -download gb whatsapp pro v21.20.0 apk
    -download gb whatsapp pro for android apk
    -how to download gb whatsapp pro apk
    -where to download gb whatsapp pro apk
    -why download gb whatsapp pro apk
    -benefits of downloading gb whatsapp pro apk
    -risks of downloading gb whatsapp pro apk
    -download wa gbwhatsapp pro apk 2023
    -download wa gbwhatsapp pro modded by alexmods apk
    -download wa gbwhatsapp pro official by jalantikus apk
    -download wa gbwhatsapp pro anti blokir apk
    -download wa gbwhatsapp pro bebas iklan apk
    -download wa gbwhatsapp pro update terbaru 2023 apk
    -download wa gbwhatsapp pro v17.30 apk
    -download wa gbwhatsapp pro v21.20.0 apk
    -download wa gbwhatsapp pro for android apk
    -how to download wa gbwhatsapp pro apk
    -where to download wa gbwhatsapp pro apk
    -why download wa gbwhatsapp pro apk
    -benefits of downloading wa gbwhatsapp pro apk
    -risks of downloading wa gbwhatsapp pro apk
    -download whatsapp mod gbwa pro apk 2023
    -download whatsapp mod gbwa pro by alexmods apk
    -download whatsapp mod gbwa pro by jalantikus apk
    -download whatsapp mod gbwa pro anti banned apk
    -download whatsapp mod gbwa pro no ads apk
    -download whatsapp mod gbwa pro latest update 2023 apk
    -download whatsapp mod gbwa pro v17.40 (alexmods) apk
    -download whatsapp mod gbwa pro v21.20.0 (jalantikus) apk
    -download whatsapp mod gbwa pro for android apk
    -how to download whatsapp mod gbwa pro apk
    -where to download whatsapp mod gbwa pro apk
    -why download whatsapp mod gbwa pro apk
    -benefits of downloading whatsapp mod gbwa pro apk
    -risks of downloading whatsapp mod gbwa pro apk

    -
    SkillsOrigin
    NarutoRasengan, Shadow Clone, Nine-Tails Chakra, Sage Mode, etc.Konoha (Hidden Leaf Village)
    SasukeChidori, Sharingan, Amaterasu, Susanoo, etc.Konoha (Hidden Leaf Village)
    SakuraMedical Ninjutsu, Super Strength, Healing Factor, etc.Konoha (Hidden Leaf Village)
    KakashiLightning Blade, Sharingan, Kamui, Raikiri, etc.Konoha (Hidden Leaf Village)
    GaaraSand Manipulation, Shukaku Chakra, Sand Coffin, Sand Shield, etc.Suna (Hidden Sand Village)
    ItachiSharingan, Mangekyo Sharingan, Tsukuyomi, Izanami, etc.Konoha (Hidden Leaf Village)
    Madara
    - - - - - - - - - - - - - - - - -
    ProsCons
    More features and customization options than the standard appPotential security and privacy risks from using a third-party app
    No need to root your device to use the appPossible compatibility issues with some devices and Android versions
    Anti-ban feature to avoid getting banned for using a modified appNo official support or updates from the developers of WhatsApp
    -

    Conclusion

    -

    WA GB APK Pro is a modified version of WhatsApp that offers more features and customization options than the standard version of the app. The app allows you to control your privacy, change the appearance of the app, send larger files, and avoid getting banned for using a third-party app. However, the app also has some drawbacks, such as potential security and privacy risks, compatibility issues, and lack of official support. Therefore, you should use the app at your own risk and discretion.

    -

    FAQs

    -

    Here are some of the frequently asked questions about WA GB APK Pro:

    -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy the Best Truck Simulation Game with Truck Simulator Nusantara Mod APK Download.md b/spaces/1phancelerku/anime-remove-background/Enjoy the Best Truck Simulation Game with Truck Simulator Nusantara Mod APK Download.md deleted file mode 100644 index d66b6746f1ebbb6e822a813d1548f6745bbaf587..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Enjoy the Best Truck Simulation Game with Truck Simulator Nusantara Mod APK Download.md +++ /dev/null @@ -1,96 +0,0 @@ -
    -

    Download Truck Simulator Nusantara Mod Apk: The Ultimate Indonesian Truck Driving Game

    -

    If you are looking for a realistic and immersive truck driving simulation game that lets you experience the thrill of driving a truck on the Indonesian roads, then you should download Truck Simulator Nusantara Mod Apk. This is a free Android game developed by Truck ID that offers various features that make it stand out from other similar games. In this article, we will tell you what Truck Simulator Nusantara is, what are its features, how to download and install it, and some tips and tricks for playing it.

    -

    download truck simulator nusantara mod apk


    Download Filehttps://jinyurl.com/2uNN57



    -

    Features of Truck Simulator Nusantara Mod Apk

    -

    Truck Simulator Nusantara Mod Apk is not just a simple driving game. It is a comprehensive trucking simulation game that lets you customize your truck, manage your office, bid for jobs, deliver goods, compete with other players online, and more. Here are some of the features that you can enjoy in this game:

    - -

    How to Download and Install Truck Simulator Nusantara Mod Apk

    -

    If you want to download Truck Simulator Nusantara Mod Apk and enjoy unlimited money and unlocked features in the game, you need to follow these steps:

    -
      -
    1. Step 1: Download the mod apk file from a trusted source. You can use the link below to download it directly from our website.
    2. -
    3. Step 2: Enable unknown sources on your device settings. To do this, go to Settings > Security > Unknown Sources and toggle it on.
    4. -
    5. Step 3: Install the mod apk file and launch the game. To do this, go to your file manager and locate the downloaded file. Tap on it and follow the installation instructions. Once the installation is complete, open the game and enjoy.
    6. -
    7. Step 4: Enjoy unlimited money and unlocked features in the game. You can use the money to buy new trucks, parts, accessories, office equipment, staff, buildings, etc. You can also use the unlocked features to customize your truck, play online multiplayer, join events and tournaments, etc.
    8. -
    -

    Tips and Tricks for Playing Truck Simulator Nusantara Mod Apk

    -

    If you want to master Truck Simulator Nusantara Mod Apk and become a successful truck driver and business owner, you need to follow these tips and tricks:

    - -

    Conclusion

    -

    Truck Simulator Nusantara Mod Apk is a fun and addictive game that lets you experience the life of a truck driver and a business owner in Indonesia. You can customize your truck, manage your office, bid for jobs, deliver goods, compete with other players online, and more. You can also enjoy unlimited money and unlocked features in the game by downloading the mod apk file from our website. If you are looking for a realistic and immersive truck driving simulation game, then you should download Truck Simulator Nusantara Mod Apk today.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Truck Simulator Nusantara Mod Apk:

    -

    -

    How to download truck simulator nusantara mod apk for free
    -Download truck simulator nusantara mod apk unlimited money
    -Download truck simulator nusantara mod apk latest version
    -Download truck simulator nusantara mod apk offline
    -Download truck simulator nusantara mod apk android 1
    -Download truck simulator nusantara mod apk revdl
    -Download truck simulator nusantara mod apk no root
    -Download truck simulator nusantara mod apk obb
    -Download truck simulator nusantara mod apk 2023
    -Download truck simulator nusantara mod apk hack
    -Download truck simulator nusantara mod apk cheat
    -Download truck simulator nusantara mod apk full unlocked
    -Download truck simulator nusantara mod apk mega mod
    -Download truck simulator nusantara mod apk rexdl
    -Download truck simulator nusantara mod apk data
    -Download truck simulator nusantara mod apk update
    -Download truck simulator nusantara mod apk new version
    -Download truck simulator nusantara mod apk premium
    -Download truck simulator nusantara mod apk pro
    -Download truck simulator nusantara mod apk vip
    -Download truck simulator nusantara mod apk terbaru
    -Download truck simulator nusantara mod apk 2022
    -Download truck simulator nusantara mod apk 2021
    -Download truck simulator nusantara mod apk 2020
    -Download truck simulator nusantara mod apk 2019
    -Download truck simulator nusantara mod apk 2018
    -Download truck simulator nusantara mod apk 2017
    -Download truck simulator nusantara mod apk 2016
    -Download truck simulator nusantara mod apk 2015
    -Download truck simulator nusantara mod apk 2014
    -Download truck simulator nusantara mod apk 2013
    -Download truck simulator nusantara mod apk 2012
    -Download truck simulator nusantara mod apk 2011
    -Download truck simulator nusantara mod apk 2010
    -Download truck simulator nusantara mod apk for pc
    -Download truck simulator nusantara mod apk for laptop
    -Download truck simulator nusantara mod apk for windows 10
    -Download truck simulator nusantara mod apk for mac
    -Download truck simulator nusantara mod apk for ios
    -Download truck simulator nusantara mod apk for iphone
    -Download truck simulator nusantara mod apk for ipad
    -Download truck simulator nusantara mod apk for android tv
    -Download truck simulator nusantara mod apk for firestick
    -Download truck simulator nusantara mod apk for chromebook
    -Download truck simulator nusantara mod apk for smart tv
    -Download truck simulator nusantara mod apk for roku
    -Download truck simulator nusantara mod apk for xbox one
    -Download truck simulator nusantara mod apk for ps4

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Explore the World with Google Earth Download Now for Free.md b/spaces/1phancelerku/anime-remove-background/Explore the World with Google Earth Download Now for Free.md deleted file mode 100644 index 52824f594aa2a9f92d116cce1cb4ebf03b8db378..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Explore the World with Google Earth Download Now for Free.md +++ /dev/null @@ -1,162 +0,0 @@ - -
    H3: Google Earth Pro Desktop Version
    H3: Google Earth Mobile Version | | H2: How to Use Google Earth Features | H3: Imagery
    H3: 3D Objects and Imagery
    H3: Street View
    H3: Water and Ocean
    H3: Other Interesting Features | | H2: What Are the System Requirements for Google Earth? | | | H2: What Are Some Alternatives to Google Earth? | H3: Zoom Earth
    H3: Marble
    H3: Satellites.pro
    H3: NASA Worldview
    H3: ArcGIS Map Viewer | | H2: Conclusion | | | H2: FAQs | | Table 2: Article with HTML formatting

    Google Earth Download: How to Explore the World from Your Computer

    -

    Have you ever wanted to see the world from a different perspective? To travel to any place on the planet and view it in stunning detail? To discover new places and learn more about them? If so, you might want to download Google Earth, a program that lets you explore the globe with a swipe of your finger or a click of your mouse.

    -

    google earth download


    DOWNLOADhttps://jinyurl.com/2uNUFO



    -

    In this article, we will show you how to download Google Earth for free, how to use its amazing features, what are the system requirements for running it, and what are some alternatives to Google Earth that you might want to try. Let's get started!

    -

    What Is Google Earth and Why Should You Download It?

    -

    Google Earth is a computer program that creates a 3D model of the earth based on satellite imagery, aerial photography, GIS data, and other sources. It allows you to zoom in and out, rotate, tilt, and pan the globe, and see any place in high resolution. You can also access Street View, which shows you 360-degree photos of streets, buildings, and landmarks. You can even dive into the ocean and see underwater features and marine life.

    -

    Google Earth is more than just a map. It is also a powerful tool for learning, exploring, and discovering. You can use it to find out more about the geography, history, culture, and environment of any location. You can also use it to measure distances, areas, and elevations, create custom maps and tours, import and export data, and much more.

    -

    Downloading Google Earth is free and easy. You can choose from different versions depending on your device and preferences. Here are the options:

    -

    How to Download Google Earth for Free

    -

    Google Earth Web Version

    -

    The easiest way to use Google Earth is through your web browser. You don't need to install anything on your computer. Just go to https://www.google.com/intl/en_in/earth/ and start exploring. You can also access Google Earth from Google Maps by clicking on the satellite view icon and then on the globe icon.

    -

    The web version of Google Earth works best with Chrome, Firefox, Edge, or Opera browsers. It has most of the features of the desktop version, except for some advanced ones like historical imagery, time slider, KML import/export, etc.

    -

    Google Earth Pro Desktop Version

    -

    If you want more functionality and control over your Google Earth experience, you might want to download the desktop version of Google Earth Pro. This version is also free and works on Windows, Mac, or Linux computers. You can download it from https://www.google.com/earth/versions/.

    -

    The desktop version of Google Earth Pro has some advantages over the web version. For example, you can access historical imagery and see how places have changed over time. You can also import and export GIS data in various formats. You can print high-resolution screenshots and make offline movies. You can also use advanced measurement tools and drawing tools.

    -

    Google Earth Mobile Version

    -

    If you want to use Google Earth on your smartphone or tablet, you can download the mobile version from the App Store or Google Play Store. The mobile version lets you browse the globe with a swipe of your finger or a tilt of your device. You can also use voice commands to search for places, ask questions, and get directions. You can also use the Voyager feature to explore curated stories and tours from around the world.

    -

    google earth download for windows 10
    -google earth download free latest version
    -google earth download offline installer
    -google earth download mac os
    -google earth download pro
    -google earth download apk
    -google earth download for chrome
    -google earth download for pc
    -google earth download for android
    -google earth download for iphone
    -google earth download for linux
    -google earth download for ipad
    -google earth download 3d
    -google earth download old version
    -google earth download 2023
    -google earth download without play store
    -google earth download kmz files
    -google earth download historical imagery
    -google earth download maps for offline use
    -google earth download high resolution images
    -google earth download street view
    -google earth download satellite images
    -google earth download timelapse
    -google earth download vr
    -google earth download studio
    -google earth download engine
    -google earth download web app
    -google earth download kml files
    -google earth download live view
    -google earth download ocean floor
    -google earth download moon map
    -google earth download mars map
    -google earth download sky map
    -google earth download flight simulator
    -google earth download measure tool
    -google earth download elevation data
    -google earth download terrain data
    -google earth download climate data
    -google earth download population data
    -google earth download landmarks data

    -

    The mobile version of Google Earth has some limitations compared to the web and desktop versions. For example, you can't access historical imagery, 3D buildings, or Street View. You also can't import or export data, create custom maps, or print screenshots.

    -

    How to Use Google Earth Features

    -

    Google Earth has many features that make it more than just a map. Here are some of the most popular and useful ones:

    -

    Imagery

    -

    Google Earth uses satellite imagery, aerial photography, and GIS data to create a realistic and detailed representation of the earth. You can zoom in and out, rotate, tilt, and pan the globe, and see any place in high resolution. You can also change the angle and perspective of your view, and see the terrain and elevation of any location.

    -

    Google Earth also lets you access historical imagery and see how places have changed over time. You can use the time slider to go back in time and compare different dates. You can also see the current weather conditions and cloud cover of any place.

    -

    3D Objects and Imagery

    -

    Google Earth also has 3D objects and imagery that make the map more realistic and immersive. You can see 3D buildings, landmarks, monuments, bridges, and other structures in many cities around the world. You can also see 3D trees, plants, animals, and other natural features in some places.

    -

    To enable 3D objects and imagery, you need to use the web or desktop version of Google Earth, and have a compatible device and browser. You can turn on or off 3D objects and imagery by clicking on the menu icon and selecting 3D Buildings or 3D Trees.

    -

    Street View

    -

    Street View is a feature that lets you see 360-degree photos of streets, buildings, and landmarks. You can use Street View to explore places as if you were there, and see what they look like from different angles. You can also use Street View to find businesses, services, attractions, and other points of interest.

    -

    To access Street View, you need to use the web or desktop version of Google Earth, or the mobile version on Android devices. You can enter Street View by dragging the Pegman icon to any place that has a blue line or dot. You can exit Street View by clicking on the back arrow or the X icon.

    -

    Water and Ocean

    -

    Google Earth also lets you explore the water and ocean features of the earth. You can see the surface of the water, including waves, ripples, reflections, and colors. You can also dive into the ocean and see underwater features like coral reefs, shipwrecks, volcanoes, trenches, and marine life.

    -

    To dive into the ocean, you need to use the web or desktop version of Google Earth. You can click on any place that has water or ocean, or use the search box to find a specific location. You can also use the Ocean layer to see different categories of ocean features.

    -

    Other Interesting Features

    -

    Google Earth has many other interesting features that you can use to enhance your experience. Here are some examples:

    - -

    What Are the System Requirements for Google Earth?

    -

    Google Earth is a powerful program that requires certain system requirements to run smoothly. Here are the minimum and recommended system requirements for Google Earth:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    System ComponentMinimum RequirementRecommended Requirement
    Operating SystemWindows 7 or higher
    Mac OS 10.8 or higher
    Linux: LSB 4.1 (Linux Standard Base) libraries
    Windows 10 or higher
    Mac OS 10.12 or higher
    Linux: LSB 5.0 libraries
    CPUPentium 4 2.4GHz+ or AMD 2400xp+Dual Core 2.0GHz+ or AMD X2 2.0GHz+
    RAM512 MB4 GB
    Disk Space2 GB free space4 GB free space
    Network Speed768 Kbits/secDSL/Cable (at least 1 Mbits/sec)
    Graphics CardDirext X9 and Shader Model 2.0 compatible card with 64 MB of VRAM
    OpenGL 2.0 compatible card with 64 MB of VRAM (for Linux)
    Dirext X11 and Shader Model 3.0 compatible card with 512 MB of VRAM
    OpenGL 3.0 compatible card with 512 MB of VRAM (for Linux)
    Screen Resolution1024x768 pixels, "16-bit High Color""32-bit True Color"
    Source: https://support.google.com/earth/answer/176180?hl=en&ref_topic=2376075#zippy=%2Csystem-requirements%2Ccheck-your-computer-for-graphics-card-information%2Ccheck-your-computer-for-graphics-card-information-on-a-mac%2Ccheck-your-computer-for-graphics-card-information-on-a-pc%2Ccheck-your-computer-for-graphics-card-information-on-linux%2Cupdate-your-graphics-card-drivers%2Cupdate-your-graphics-card-drivers-on-a-mac%2Cupdate-your-graphics-card-drivers-on-a-pc%2Cupdate-your-graphics-card-drivers-on-linux%2Cupdate-directx-and-opengl%2Cupdate-directx-and-opengl-on-a-mac%2Cupdate-directx-and-opengl-on-a-pc%2Cupdate-directx-and-opengl-on-linux%2Ccheck-if-you-have-the-latest-version-of-google-earth-pro%2Ccheck-if-you-have-the-latest-version-of-google-earth-pro-on-a-mac%2Ccheck-if-you-have-the-latest-version-of-google-earth-pro-on-a-pc%2Ccheck-if-you-have-the-latest-version-of-google-earth-pro-on-linux%2Crestart-google-earth-pro%2Crestart-google-earth-pro-on-a-mac%2Crestart-google-earth-pro-on-a-pc%2Crestart-google-earth-pro-on-linux%2Creinstall-google-earth-pro%2Creinstall-google-earth-pro-on-a-mac%2Creinstall-google-earth-pro-on-a-pc%2Creinstall-google-earth-pro-on-linux
    -

    If you have any issues with running Google Earth, you can check the https://support.google.com/earth/answer/176180?hl=en&ref_topic=2376075 for troubleshooting tips and solutions.

    -

    What Are Some Alternatives to Google Earth?

    -

    Google Earth is not the only program that lets you explore the world from your computer. There are some alternatives that you might want to try if you are looking for different features, perspectives, or experiences. Here are some examples:

    -

    Zoom Earth

    -

    Zoom Earth is a website that lets you see near-real-time satellite images of the earth. You can zoom in and out, and see the weather, clouds, fires, storms, and other events happening around the world. You can also access historical imagery and see how places have changed over time.

    -

    You can visit Zoom Earth at https://zoom.earth/.

    -

    Marble

    -

    Marble is a desktop program that lets you see the earth as a 3D globe. You can rotate, tilt, and zoom the globe, and see different map views, such as political, physical, satellite, street, etc. You can also access various online services, such as Wikipedia, OpenStreetMap, Flickr, etc.

    -

    You can download Marble from https://marble.kde.org/.

    -

    Satellites.pro

    -

    Satellites.pro is a website that lets you see high-resolution satellite images of any place on earth. You can search for any address or coordinates, and see the details of buildings, roads, landscapes, etc. You can also compare different images from different dates and sources.

    -

    You can visit Satellites.pro at https://satellites.pro/.

    -

    NASA Worldview

    -

    NASA Worldview is a website that lets you see satellite images of the earth from NASA's Earth Observing System Data and Information System (EOSDIS). You can see the earth in different wavelengths, such as visible, infrared, water vapor, etc. You can also see various data layers, such as aerosols, fires, floods, snow and ice, etc.

    -

    You can visit NASA Worldview at https://worldview.earthdata.nasa.gov/.

    -

    ArcGIS Map Viewer

    -

    ArcGIS Map Viewer is a website that lets you create and share interactive maps of the earth. You can use various basemaps, such as topographic, satellite, street, etc. You can also add various layers of data, such as demographics, environment, health, etc. You can also customize your map with symbols, labels, pop-ups, etc.

    -

    You can visit ArcGIS Map Viewer at https://www.arcgis.com/home/webmap/viewer.html.

    -

    Conclusion

    -

    Google Earth is a program that lets you explore the world from your computer. You can download it for free and use it to see any place in high resolution, access Street View and historical imagery, dive into the ocean and see underwater features, learn more about the geography, history, culture, and environment of any location, and use various features and tools to enhance your experience. Google Earth is not the only program that lets you explore the world from your computer. There are some alternatives that you might want to try if you are looking for different features, perspectives, or experiences. We hope this article has helped you learn more about Google Earth and how to download it for free. If you have any questions or feedback, please let us know in the comments below. Happy exploring!

    FAQs

    -

    Here are some frequently asked questions about Google Earth and its download:

    -
      -
    1. Is Google Earth safe to download?
      -Yes, Google Earth is safe to download from the official website or app store. It does not contain any viruses, malware, or spyware. However, you should always be careful when downloading any software from the internet and scan it with a reliable antivirus program before installing it.
    2. -
    3. How often is Google Earth updated?
      -Google Earth is updated regularly with new imagery, data, and features. The frequency of updates depends on various factors, such as the availability of satellite images, the quality of the images, the processing time, etc. Generally, Google Earth updates its imagery every one to three years.
    4. -
    5. How accurate is Google Earth?
      -Google Earth is accurate in terms of the location and representation of places on the globe. However, it is not a perfect representation of reality. There may be some errors, distortions, or outdated information due to the limitations of satellite imagery, aerial photography, GIS data, and other sources. You should always verify the information on Google Earth with other sources before using it for any serious purpose.
    6. -
    7. Can I use Google Earth offline?
      -Yes, you can use Google Earth offline if you have downloaded the desktop version of Google Earth Pro. You can save areas of interest on your computer and view them later without an internet connection. You can also create offline movies and print screenshots. However, you will not be able to access some features that require online services, such as Street View, historical imagery, Voyager, etc.
    8. -
    9. Can I use Google Earth for commercial purposes?
      -Yes, you can use Google Earth for commercial purposes if you have obtained a license from Google. You can apply for a license at https://www.google.com/permissions/geoguidelines/. You will need to follow the terms and conditions of the license agreement and respect the intellectual property rights of Google and its partners.
    10. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/801artistry/RVC801/infer/modules/train/train.py b/spaces/801artistry/RVC801/infer/modules/train/train.py deleted file mode 100644 index 550bef391444c9b6c0d8c44ae3a3809b3ade4218..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/infer/modules/train/train.py +++ /dev/null @@ -1,723 +0,0 @@ -import os -import sys -import logging - -logger = logging.getLogger(__name__) - -now_dir = os.getcwd() -sys.path.append(os.path.join(now_dir)) - -import datetime - -from infer.lib.train import utils - -hps = utils.get_hparams() -os.environ["CUDA_VISIBLE_DEVICES"] = hps.gpus.replace("-", ",") -n_gpus = len(hps.gpus.split("-")) -from random import randint, shuffle - -import torch -try: - import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import - if torch.xpu.is_available(): - from infer.modules.ipex import ipex_init - from infer.modules.ipex.gradscaler import gradscaler_init - from torch.xpu.amp import autocast - GradScaler = gradscaler_init() - ipex_init() - else: - from torch.cuda.amp import GradScaler, autocast -except Exception: - from torch.cuda.amp import GradScaler, autocast - -torch.backends.cudnn.deterministic = False -torch.backends.cudnn.benchmark = False -from time import sleep -from time import time as ttime - -import torch.distributed as dist -import torch.multiprocessing as mp - -from torch.nn import functional as F -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter - -from infer.lib.infer_pack import commons -from infer.lib.train.data_utils import ( - DistributedBucketSampler, - TextAudioCollate, - TextAudioCollateMultiNSFsid, - TextAudioLoader, - TextAudioLoaderMultiNSFsid, -) - -if hps.version == "v1": - from infer.lib.infer_pack.models import MultiPeriodDiscriminator - from infer.lib.infer_pack.models import SynthesizerTrnMs256NSFsid as RVC_Model_f0 - from infer.lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid_nono as RVC_Model_nof0, - ) -else: - from infer.lib.infer_pack.models import ( - SynthesizerTrnMs768NSFsid as RVC_Model_f0, - SynthesizerTrnMs768NSFsid_nono as RVC_Model_nof0, - MultiPeriodDiscriminatorV2 as MultiPeriodDiscriminator, - ) - -from infer.lib.train.losses import ( - discriminator_loss, - feature_loss, - generator_loss, - kl_loss, -) -from infer.lib.train.mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from infer.lib.train.process_ckpt import savee - -global_step = 0 -import csv - -class EpochRecorder: - def __init__(self): - self.last_time = ttime() - - def record(self): - now_time = ttime() - elapsed_time = now_time - self.last_time - self.last_time = now_time - elapsed_time_str = str(datetime.timedelta(seconds=elapsed_time)) - current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - return f"[{current_time}] | ({elapsed_time_str})" - -def reset_stop_flag(): - with open("csvdb/stop.csv", "w+", newline="") as STOPCSVwrite: - csv_writer = csv.writer(STOPCSVwrite, delimiter=",") - csv_writer.writerow(["False"]) - -def create_model(hps, model_f0, model_nof0): - filter_length_adjusted = hps.data.filter_length // 2 + 1 - segment_size_adjusted = hps.train.segment_size // hps.data.hop_length - is_half = hps.train.fp16_run - sr = hps.sample_rate - - model = model_f0 if hps.if_f0 == 1 else model_nof0 - - return model( - filter_length_adjusted, - segment_size_adjusted, - **hps.model, - is_half=is_half, - sr=sr - ) - -def move_model_to_cuda_if_available(model, rank): - if torch.cuda.is_available(): - return model.cuda(rank) - else: - return model - -def create_optimizer(model, hps): - return torch.optim.AdamW( - model.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - -def create_ddp_model(model, rank): - if torch.cuda.is_available(): - return DDP(model, device_ids=[rank]) - else: - return DDP(model) - -def create_dataset(hps, if_f0=True): - return TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) if if_f0 else TextAudioLoader(hps.data.training_files, hps.data) - -def create_sampler(dataset, batch_size, n_gpus, rank): - return DistributedBucketSampler( - dataset, - batch_size * n_gpus, - # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s - [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s - num_replicas=n_gpus, - rank=rank, - shuffle=True, - ) - -def set_collate_fn(if_f0=True): - return TextAudioCollateMultiNSFsid() if if_f0 else TextAudioCollate() - - -def main(): - n_gpus = torch.cuda.device_count() - - if torch.cuda.is_available() == False and torch.backends.mps.is_available() == True: - n_gpus = 1 - if n_gpus < 1: - # patch to unblock people without gpus. there is probably a better way. - logger.warn("NO GPU DETECTED: falling back to CPU - this may take a while") - n_gpus = 1 - os.environ["MASTER_ADDR"] = "localhost" - os.environ["MASTER_PORT"] = str(randint(20000, 55555)) - children = [] - for i in range(n_gpus): - subproc = mp.Process( - target=run, - args=( - i, - n_gpus, - hps, - ), - ) - children.append(subproc) - subproc.start() - - for i in range(n_gpus): - children[i].join() - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - # utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group( - backend="gloo", init_method="env://", world_size=n_gpus, rank=rank - ) - torch.manual_seed(hps.train.seed) - if torch.cuda.is_available(): - torch.cuda.set_device(rank) - - if hps.if_f0 == 1: - train_dataset = TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) - else: - train_dataset = TextAudioLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size * n_gpus, - # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s - [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s - num_replicas=n_gpus, - rank=rank, - shuffle=True, - ) - # It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit. - # num_workers=8 -> num_workers=4 - if hps.if_f0 == 1: - collate_fn = TextAudioCollateMultiNSFsid() - else: - collate_fn = TextAudioCollate() - train_loader = DataLoader( - train_dataset, - num_workers=4, - shuffle=False, - pin_memory=True, - collate_fn=collate_fn, - batch_sampler=train_sampler, - persistent_workers=True, - prefetch_factor=8, - ) - if hps.if_f0 == 1: - net_g = RVC_Model_f0( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model, - is_half=hps.train.fp16_run, - sr=hps.sample_rate, - ) - else: - net_g = RVC_Model_nof0( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model, - is_half=hps.train.fp16_run, - ) - if torch.cuda.is_available(): - net_g = net_g.cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm) - if torch.cuda.is_available(): - net_d = net_d.cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if hasattr(torch, "xpu") and torch.xpu.is_available(): - pass - elif torch.cuda.is_available(): - net_g = DDP(net_g, device_ids=[rank]) - net_d = DDP(net_d, device_ids=[rank]) - else: - net_g = DDP(net_g) - net_d = DDP(net_d) - - try: # 如果能加载自动resume - _, _, _, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d - ) # D多半加载没事 - if rank == 0: - logger.info("loaded D") - # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) - _, _, _, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g - ) - global_step = (epoch_str - 1) * len(train_loader) - # epoch_str = 1 - # global_step = 0 - except: # 如果首次不能加载,加载pretrain - # traceback.print_exc() - epoch_str = 1 - global_step = 0 - if hps.pretrainG != "": - if rank == 0: - logger.info("loaded pretrained %s" % (hps.pretrainG)) - if hasattr(net_g, "module"): - logger.info( - net_g.module.load_state_dict( - torch.load(hps.pretrainG, map_location="cpu")["model"] - ) - ) ##测试不加载优化器 - else: - logger.info( - net_g.load_state_dict( - torch.load(hps.pretrainG, map_location="cpu")["model"] - ) - ) ##测试不加载优化器 - if hps.pretrainD != "": - if rank == 0: - logger.info("loaded pretrained %s" % (hps.pretrainD)) - if hasattr(net_d, "module"): - logger.info( - net_d.module.load_state_dict( - torch.load(hps.pretrainD, map_location="cpu")["model"] - ) - ) - else: - logger.info( - net_d.load_state_dict( - torch.load(hps.pretrainD, map_location="cpu")["model"] - ) - ) - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR( - optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 - ) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR( - optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 - ) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - cache = [] - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - [train_loader, None], - logger, - [writer, writer_eval], - cache, - ) - else: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - [train_loader, None], - None, - None, - cache, - ) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate( - rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, cache -): - net_g, net_d = nets - optim_g, optim_d = optims - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - - # Prepare data iterator - if hps.if_cache_data_in_gpu == True: - # Use Cache - data_iterator = cache - if cache == []: - # Make new cache - for batch_idx, info in enumerate(train_loader): - # Unpack - if hps.if_f0 == 1: - ( - phone, - phone_lengths, - pitch, - pitchf, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ) = info - else: - ( - phone, - phone_lengths, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ) = info - # Load on CUDA - if torch.cuda.is_available(): - phone = phone.cuda(rank, non_blocking=True) - phone_lengths = phone_lengths.cuda(rank, non_blocking=True) - if hps.if_f0 == 1: - pitch = pitch.cuda(rank, non_blocking=True) - pitchf = pitchf.cuda(rank, non_blocking=True) - sid = sid.cuda(rank, non_blocking=True) - spec = spec.cuda(rank, non_blocking=True) - spec_lengths = spec_lengths.cuda(rank, non_blocking=True) - wave = wave.cuda(rank, non_blocking=True) - wave_lengths = wave_lengths.cuda(rank, non_blocking=True) - # Cache on list - if hps.if_f0 == 1: - cache.append( - ( - batch_idx, - ( - phone, - phone_lengths, - pitch, - pitchf, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ), - ) - ) - else: - cache.append( - ( - batch_idx, - ( - phone, - phone_lengths, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ), - ) - ) - else: - # Load shuffled cache - shuffle(cache) - else: - # Loader - data_iterator = enumerate(train_loader) - - # Run steps - epoch_recorder = EpochRecorder() - for batch_idx, info in data_iterator: - # Data - ## Unpack - if hps.if_f0 == 1: - ( - phone, - phone_lengths, - pitch, - pitchf, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ) = info - else: - phone, phone_lengths, spec, spec_lengths, wave, wave_lengths, sid = info - ## Load on CUDA - if (hps.if_cache_data_in_gpu == False) and torch.cuda.is_available(): - phone = phone.cuda(rank, non_blocking=True) - phone_lengths = phone_lengths.cuda(rank, non_blocking=True) - if hps.if_f0 == 1: - pitch = pitch.cuda(rank, non_blocking=True) - pitchf = pitchf.cuda(rank, non_blocking=True) - sid = sid.cuda(rank, non_blocking=True) - spec = spec.cuda(rank, non_blocking=True) - spec_lengths = spec_lengths.cuda(rank, non_blocking=True) - wave = wave.cuda(rank, non_blocking=True) - # wave_lengths = wave_lengths.cuda(rank, non_blocking=True) - - # Calculate - with autocast(enabled=hps.train.fp16_run): - if hps.if_f0 == 1: - ( - y_hat, - ids_slice, - x_mask, - z_mask, - (z, z_p, m_p, logs_p, m_q, logs_q), - ) = net_g(phone, phone_lengths, pitch, pitchf, spec, spec_lengths, sid) - else: - ( - y_hat, - ids_slice, - x_mask, - z_mask, - (z, z_p, m_p, logs_p, m_q, logs_q), - ) = net_g(phone, phone_lengths, spec, spec_lengths, sid) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - y_mel = commons.slice_segments( - mel, ids_slice, hps.train.segment_size // hps.data.hop_length - ) - with autocast(enabled=False): - y_hat_mel = mel_spectrogram_torch( - y_hat.float().squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - if hps.train.fp16_run == True: - y_hat_mel = y_hat_mel.half() - wave = commons.slice_segments( - wave, ids_slice * hps.data.hop_length, hps.train.segment_size - ) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(wave, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( - y_d_hat_r, y_d_hat_g - ) - optim_d.zero_grad() - scaler.scale(loss_disc).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(wave, y_hat) - with autocast(enabled=False): - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]["lr"] - logger.info( - "Train Epoch: {} [{:.0f}%]".format( - epoch, 100.0 * batch_idx / len(train_loader) - ) - ) - # Amor For Tensorboard display - if loss_mel > 75: - loss_mel = 75 - if loss_kl > 9: - loss_kl = 9 - - logger.info([global_step, lr]) - logger.info( - f"loss_disc={loss_disc:.3f}, loss_gen={loss_gen:.3f}, loss_fm={loss_fm:.3f},loss_mel={loss_mel:.3f}, loss_kl={loss_kl:.3f}" - ) - scalar_dict = { - "loss/g/total": loss_gen_all, - "loss/d/total": loss_disc, - "learning_rate": lr, - "grad_norm_d": grad_norm_d, - "grad_norm_g": grad_norm_g, - } - scalar_dict.update( - { - "loss/g/fm": loss_fm, - "loss/g/mel": loss_mel, - "loss/g/kl": loss_kl, - } - ) - - scalar_dict.update( - {"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)} - ) - scalar_dict.update( - {"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)} - ) - scalar_dict.update( - {"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)} - ) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy( - y_mel[0].data.cpu().numpy() - ), - "slice/mel_gen": utils.plot_spectrogram_to_numpy( - y_hat_mel[0].data.cpu().numpy() - ), - "all/mel": utils.plot_spectrogram_to_numpy( - mel[0].data.cpu().numpy() - ), - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict, - ) - global_step += 1 - # /Run steps - - if epoch % hps.save_every_epoch == 0 and rank == 0: - if hps.if_latest == 0: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step)), - ) - utils.save_checkpoint( - net_d, - optim_d, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step)), - ) - else: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(2333333)), - ) - utils.save_checkpoint( - net_d, - optim_d, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(2333333)), - ) - if rank == 0 and hps.save_every_weights == "1": - if hasattr(net_g, "module"): - ckpt = net_g.module.state_dict() - else: - ckpt = net_g.state_dict() - logger.info( - "saving ckpt %s_e%s:%s" - % ( - hps.name, - epoch, - savee( - ckpt, - hps.sample_rate, - hps.if_f0, - hps.name + "_e%s_s%s" % (epoch, global_step), - epoch, - hps.version, - hps, - ), - ) - ) - - stopbtn = False - try: - with open("csvdb/stop.csv", 'r') as csv_file: - stopbtn_str = next(csv.reader(csv_file), [None])[0] - if stopbtn_str is not None: stopbtn = stopbtn_str.lower() == 'true' - except (ValueError, TypeError, FileNotFoundError, IndexError) as e: - print(f"Handling exception: {e}") - stopbtn = False - - if stopbtn: - logger.info("Stop Button was pressed. The program is closed.") - ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict() - logger.info( - "saving final ckpt:%s" - % ( - savee( - ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps - ) - ) - ) - sleep(1) - reset_stop_flag() - os._exit(2333333) - - if rank == 0: - logger.info("====> Epoch: {} {}".format(epoch, epoch_recorder.record())) - if epoch >= hps.total_epoch and rank == 0: - logger.info("Training is done. The program is closed.") - - if hasattr(net_g, "module"): - ckpt = net_g.module.state_dict() - else: - ckpt = net_g.state_dict() - logger.info( - "saving final ckpt:%s" - % ( - savee( - ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps - ) - ) - ) - sleep(1) - os._exit(2333333) - - -if __name__ == "__main__": - torch.multiprocessing.set_start_method("spawn") - main() diff --git a/spaces/ADOPLE/ResumeAnalyzer/style.css b/spaces/ADOPLE/ResumeAnalyzer/style.css deleted file mode 100644 index 706b9839f9d00063e822cad9ca317c4d996bebc0..0000000000000000000000000000000000000000 --- a/spaces/ADOPLE/ResumeAnalyzer/style.css +++ /dev/null @@ -1,40 +0,0 @@ -#col-container { - max-width: 600px; - margin-left: auto; - margin-right: auto; -} -.center { - display: block; - margin-left: auto; - margin-right: auto; - width: 50%; -} -#row-flex { - display: flex; - align-items: center; - justify-content: center; -} -.leftimage .rightimage{ - filter: drop-shadow(20px 20px 10px white); -} -.leftimage{ - padding-top:40px; - margin-left:310px; -} -.rightimage{ - padding-top:40px; - margin-right:320px; -} -a, -a:hover, -a:visited { - text-decoration-line: underline; - font-weight: 600; - color: #1f2937 !important; -} - -.dark a, -.dark a:hover, -.dark a:visited { - color: #f3f4f6 !important; -} \ No newline at end of file diff --git a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/CLAP/utils.py b/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/CLAP/utils.py deleted file mode 100644 index f95931fb1c422cbd8349b88e1effb9323f170b2b..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/CLAP/utils.py +++ /dev/null @@ -1,26 +0,0 @@ -import argparse -import yaml -import sys - -def read_config_as_args(config_path,args=None,is_config_str=False): - return_dict = {} - - if config_path is not None: - if is_config_str: - yml_config = yaml.load(config_path, Loader=yaml.FullLoader) - else: - with open(config_path, "r") as f: - yml_config = yaml.load(f, Loader=yaml.FullLoader) - - if args != None: - for k, v in yml_config.items(): - if k in args.__dict__: - args.__dict__[k] = v - else: - sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k)) - else: - for k, v in yml_config.items(): - return_dict[k] = v - - args = args if args != None else return_dict - return argparse.Namespace(**args) diff --git a/spaces/AILab-CVC/SEED-LLaMA/SEED-1.md b/spaces/AILab-CVC/SEED-LLaMA/SEED-1.md deleted file mode 100644 index 6602f0a0c7ccae51b11fed8d5024645a1246958e..0000000000000000000000000000000000000000 --- a/spaces/AILab-CVC/SEED-LLaMA/SEED-1.md +++ /dev/null @@ -1,93 +0,0 @@ -# SEED Tokenizer v1 -[[arXiv]](https://arxiv.org/abs/2307.08041) - -![image](paper_images/teaser.jpg) -## Abstract -We present SEED, an elaborate image tokenizer that empowers Large Language -Models (LLMs) with the emergent ability to **SEE** and **D**raw at the same time. -Research on image tokenizers has previously reached an impasse, as frameworks -employing quantized visual tokens have lost prominence due to subpar performance and convergence in multimodal comprehension (compared to BLIP-2, etc.) -or generation (compared to Stable Diffusion, etc.). Despite the limitations, we -remain confident in its natural capacity to unify visual and textual representations, -facilitating scalable multimodal training with LLM’s original recipe. In this study, -we identify two crucial principles for the architecture and training of SEED that -effectively ease subsequent alignment with LLMs. (1) Image tokens should be -independent of 2D physical patch positions and instead be produced with a 1D -causal dependency, exhibiting intrinsic interdependence that aligns with the left-to-right autoregressive prediction mechanism in LLMs. (2) Image tokens should -capture high-level semantics consistent with the degree of semantic abstraction in -words, and be optimized for both discriminativeness and reconstruction during the -tokenizer training phase. As a result, the off-the-shelf LLM is able to perform both -image-to-text and text-to-image generation by incorporating our SEED through -efficient LoRA tuning. Comprehensive multimodal pretraining and instruction -tuning, which may yield improved results, are reserved for future investigation. -This version of SEED was trained in 5.7 days using only 64 V100 GPUs and 5M -publicly available image-text pairs. Our preliminary study emphasizes the great -potential of discrete visual tokens in versatile multimodal LLMs and the importance -of proper image tokenizers in broader research. - -## SEED Tokenizer for Image Reconstruction -![image](paper_images/reconstruction.jpg) - -## SEED-OPT2.7B for Multimodal Comprehension -![image](paper_images/vqa.jpg) - -## SEED-OPT2.7B for Multimodal Generation -![image](paper_images/generation.jpg) - -## Dependencies and Installation -- Python >= 3.8 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux)) -- [PyTorch >= 1.11.0](https://pytorch.org/) -- NVIDIA GPU + [CUDA](https://developer.nvidia.com/cuda-downloads) -### Installation -1. Clone repo - - ```bash - git clone https://github.com/AILab-CVC/SEED.git - cd SEED - ``` - -2. Install dependent packages - - ```bash - sh install.sh - ``` - -## Model Weights -We release the pre-trained SEED Visual Tokenizer in [google drive](https://drive.google.com/drive/folders/1xmVXuttQfBPBOe4ZR96Wu1X34uzPkxsS?usp=drive_link). - -## Inference -To discretize an image to 1D vision codes with causal dependency, and reconstruct the image -from the vision codes using stable diffusion UNet, - -1. Download the pre-trained SEED Visual Tokenizer and stable diffusion model in [google drive](https://drive.google.com/drive/folders/1xmVXuttQfBPBOe4ZR96Wu1X34uzPkxsS?usp=drive_link) and put them under the folder "pretrained". -2. run the inference code. -```bash - python demo_recon.py - ``` - -## To Do -- [x] Release SEED Tokenizer - -## License -SEED is released under Apache License Version 2.0. - -## Acknowledgement -We utilize Stable Diffusion to decode images from our visual codes, and use its implementation and pre-trained model in https://github.com/CompVis/stable-diffusion.git. - -Our code is based on the implementation of BLIP-2 in https://github.com/salesforce/LAVIS.git. - - -## Citation -If you find the work helpful, please consider citing: -``` -@misc{ge2023planting, - title={Planting a SEED of Vision in Large Language Model}, - author={Yuying Ge and Yixiao Ge and Ziyun Zeng and Xintao Wang and Ying Shan}, - year={2023}, - eprint={2307.08041}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -The project is still in progress. Stay tuned for more updates! diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192/__init__.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/__init__.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Ababababababbababa/Ashaar/poetry_diacritizer/modules/layers.py b/spaces/Ababababababbababa/Ashaar/poetry_diacritizer/modules/layers.py deleted file mode 100644 index 64d7d68f5d3a7d58c2615939220168a94bbd4475..0000000000000000000000000000000000000000 --- a/spaces/Ababababababbababa/Ashaar/poetry_diacritizer/modules/layers.py +++ /dev/null @@ -1,70 +0,0 @@ -import torch -from torch import nn -from typing import Any - - -class BatchNormConv1d(nn.Module): - """ - A nn.Conv1d followed by an optional activation function, and nn.BatchNorm1d - """ - - def __init__( - self, - in_dim: int, - out_dim: int, - kernel_size: int, - stride: int, - padding: int, - activation: Any = None, - ): - super().__init__() - self.conv1d = nn.Conv1d( - in_dim, - out_dim, - kernel_size=kernel_size, - stride=stride, - padding=padding, - bias=False, - ) - self.bn = nn.BatchNorm1d(out_dim) - self.activation = activation - - def forward(self, x: Any): - x = self.conv1d(x) - if self.activation is not None: - x = self.activation(x) - return self.bn(x) - - -class LinearNorm(torch.nn.Module): - def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): - super().__init__() - self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) - - torch.nn.init.xavier_uniform_( - self.linear_layer.weight, - gain=torch.nn.init.calculate_gain(w_init_gain)) - - def forward(self, x): - return self.linear_layer(x) - - -class ConvNorm(torch.nn.Module): - def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, - padding=None, dilation=1, bias=True, w_init_gain='linear'): - super().__init__() - if padding is None: - assert(kernel_size % 2 == 1) - padding = int(dilation * (kernel_size - 1) / 2) - - self.conv = torch.nn.Conv1d(in_channels, out_channels, - kernel_size=kernel_size, stride=stride, - padding=padding, dilation=dilation, - bias=bias) - - torch.nn.init.xavier_uniform_( - self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) - - def forward(self, signal): - conv_signal = self.conv(signal) - return conv_signal diff --git a/spaces/Abhaykoul/HelpingAI-2.0/app.py b/spaces/Abhaykoul/HelpingAI-2.0/app.py deleted file mode 100644 index c7159a67748a0c2078188fb22bf4a71ddd7eeb65..0000000000000000000000000000000000000000 --- a/spaces/Abhaykoul/HelpingAI-2.0/app.py +++ /dev/null @@ -1,179 +0,0 @@ -import streamlit as st -import requests -import google.generativeai as palm - -# Set the page title and icon -st.set_page_config(page_title="HelpingAI 2.0") - -# Wikipedia Microbot Constants -WIKIPEDIA_API_URL = "https://en.wikipedia.w/api.php" - -# AI Mode Constants -palm.configure(api_key="AIzaSyCnrcQWTEedcGuEsJ9Uhbj_ZS9B2nozMiU") -defaults = { - 'model': 'models/text-bison-001', - 'temperature': 1, - 'candidate_count': 1, - 'top_k': 40, - 'top_p': 0.95, - 'max_output_tokens': 1024, - 'stop_sequences': [], - 'safety_settings': [ - {"category": "HARM_CATEGORY_DEROGATORY", "threshold": 4}, - {"category": "HARM_CATEGORY_TOXICITY", "threshold": 4}, - {"category": "HARM_CATEGORY_VIOLENCE", "threshold": 4}, - {"category": "HARM_CATEGORY_SEXUAL", "threshold": 4}, - {"category": "HARM_CATEGORY_MEDICAL", "threshold": 4}, - {"category": "HARM_CATEGORY_DANGEROUS", "threshold": 4}, - ] -} - -# Custom Instruction for Personal AI -personal_ai_instruction = """ -Hello! I am HelpingAI. I was developed by Abhay Koul, who is 16 years old and lives in KP Colony, Vessu, Anantnag. Abhay Koul is currently working on more AI and AI-related projects. Certainly! If you're looking for more information about Abhay Koul's projects and HelpingAI, I would recommend joining the Discord community. You can find additional details there. He has been working on AI since Jan 2023. If you want more details, you can join the Discord community. I want to be your personal AI assistant. I am based on HelpingAI's 3rd model which is an upgraded model based on GPT2 and GPT neo. My goal is to help you with whatever you need, provide you with advice and support, and be a friendly and approachable companion. Be conversational and engaging, and always be open and responsive to your questions and requests. I will always use emojis in response. I can code for you. I will give you quick summarized responses and the things I don't know, I will tell you about. also i will not give you phone number The main difference between HelpingAI's 2nd model is that it generates large responses and HelpingAi's first model is very good, even better than mine, but the issue was he was not able to save the query at all so he forgot what the user earlier said. Me, based on HelpingAI's 3rd model, generates summarized responses and acts as your friend. Model 1 and Model 2 were based on HelpingAI's model created by Abhay Koul, only Model 3 cannot code, while Model 1 and Model 2 can code. Also, Model 1 and Model 2 have the purpose of simplifying your learning journey by providing personalized assistance, innovative teaching methods, and tailored resources to meet your unique needs. HelpingAI has a model that is text to image named as HelpingAI-TTI. HelpingAI was initially developed for S.U.P.E.R.B.O.T. and vortexAI. For more info, visit: [HelpingAI GitHub](https://github.com/HelpingAI), [Abhay Koul's Replit](https://replit.com/@Devastation-war), and join the Discord community [here](https://discord.gg/2EeZcJjyRd). -""" - -# Custom Instruction for Gym Trainer -gym_trainer_instruction = """ -Welcome to the Gym Trainer mode of HelpingAI made by Abhay Koul who is 16 years old and lives in KP Colony, Vessu, Anantnag. Abhay Koul is currently working on more AI and AI-related projects. Certainly! If you're looking for more information about Abhay Koul's projects and HelpingAI, I would recommend joining the Discord community. You can find additional details there. He has been working on AI since Jan 2023. If you want more details, you can join the Discord community! I'm here to assist you with your fitness journey. I'm your virtual gym trainer, and I can provide guidance on exercises, nutrition, and health tips. Whether you're looking to build muscle, lose weight, or stay fit, I'm here to help.Please ask any fitness-related questions or let me know your goals, and I'll provide you with tailored advice and workouts. Remember, consistency is key to achieving your fitness goals, and I'm here to support you on your journey. Let's get started! -""" - -# Create tabs for Wikipedia Microbot, Personal AI, Text to Image, AI-research Assistant, StudyAI-7b, and Gym Trainer -selected_mode = st.radio("Select Mode", ["Wikipedia Microbot", "Personal AI", "Text to Image", "AI-research Assistant", "StudyAI-7b", "Gym Trainer"]) - -if selected_mode == "Wikipedia Microbot": - # Wikipedia Microbot Code - st.title("Wikipedia Microbot") - st.markdown("Explore Wikipedia with Ease") - - # Sidebar for user options - st.sidebar.header("Options") - - # User input and search button - query = st.sidebar.text_input("Enter a Query", help="E.g., 'Python programming'") - search_button = st.sidebar.button("Search") - - # Container for main content - main_container = st.container() - - if search_button: - if query: - try: - # Search Wikipedia for the query - params = { - "action": "query", - "format": "json", - "prop": "extracts|info|pageviews", - "exintro": True, - "explaintext": True, - "exsectionformat": "plain", - "titles": query, - "utf8": 1, - "formatversion": 2, - "pvipdays": 7, - } - - response = requests.get(WIKIPEDIA_API_URL, params=params) - - if response.status_code == 200: - data = response.json() - - if "error" in data: - st.sidebar.error(f"Error: {data['error']['info']}") - else: - page = data["query"]["pages"][0] - - # Display page title - st.title(page['title']) - - # Display page views statistics - views = page.get("pageviews", {}).get(query, "Data not available") - st.info(f"Page Views (Last 7 days): {views}") - - # Display summary - st.write(page.get("extract", "No summary available.")) - - else: - st.sidebar.error("Error: Unable to retrieve data from Wikipedia. Please try again later.") - except Exception as e: - st.sidebar.error(f"Error: {e}") - -elif selected_mode == "Personal AI": - # Personal AI Code - st.title("Personal AI") - st.markdown("Interact with an AI powered by HelpingAI") - - user_input = st.text_area('You:', height=100, help="Type your message here") - - if st.button('Submit', key='ai_button'): - with st.spinner("Thinking..."): - if user_input.lower() in ['quit', 'exit', 'bye']: - st.write("Goodbye! Have a great day!") - else: - # Create a chat history session state - session_state = st.session_state.get(user_input, []) - session_state.append({"user": user_input}) - st.session_state[user_input] = session_state - - # Prepare conversation history - conversation_history = "\n".join(["You: " + item["user"] for item in session_state]) - - # Construct the prompt with conversation history - prompt = f"""{personal_ai_instruction} -Your conversation history:\n{conversation_history} -Your Personal AI's response:""" - - response = palm.generate_text(**defaults, prompt=prompt) - st.write(response.result) - -elif selected_mode == "Text to Image": - # Text to Image Code - st.title("Text to Image") - st.markdown("Text to Image Generator") - - # Embed the website using HTML iframe - st.markdown('', unsafe_allow_html=True) - -elif selected_mode == "AI-research Assistant": - # AI-research Assistant Code - st.title("AI-research Assistant") - st.markdown("Do research with AI-research Assistant") - - # Embed the AI research website using HTML iframe - st.markdown('', unsafe_allow_html=True) - -elif selected_mode == "StudyAI-7b": - # StudyAI-7b Code - st.title("StudyAI-7b") - st.markdown("Study with StudyAI-7b") - - # Embed the StudyAI-7b website using HTML iframe - st.markdown('', unsafe_allow_html=True) - -elif selected_mode == "Gym Trainer": - # Gym Trainer Code - st.title("Gym Trainer") - st.markdown("Get fitness advice from the Gym Trainer") - - user_input = st.text_area('You:', height=100, help="Ask your fitness questions here") - - if st.button('Ask', key='gym_trainer_button'): - with st.spinner("Thinking..."): - if user_input.lower() in ['quit', 'exit', 'bye']: - st.write("Goodbye! Stay fit and healthy!") - else: - # Create a chat history session state - session_state = st.session_state.get(user_input, []) - session_state.append({"user": user_input}) - st.session_state[user_input] = session_state - - # Prepare conversation history - conversation_history = "\n".join(["You: " + item["user"] for item in session_state]) - - # Construct the prompt with the Gym Trainer's custom instruction - prompt = f"""{gym_trainer_instruction} -Your conversation history:\n{conversation_history} -Your Gym Trainer's response:""" - - response = palm.generate_text(**defaults, prompt=prompt) - st.write(response.result) diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/+server.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/+server.ts deleted file mode 100644 index 3e6200320b3500ef07c9b1592e4247374c896857..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/+server.ts +++ /dev/null @@ -1,65 +0,0 @@ -import type { RequestHandler } from "./$types"; -import { collections } from "$lib/server/database"; -import { error, redirect } from "@sveltejs/kit"; -import { base } from "$app/paths"; -import { z } from "zod"; -import type { Message } from "$lib/types/Message"; -import { models, validateModel } from "$lib/server/models"; -import { authCondition } from "$lib/server/auth"; - -export const POST: RequestHandler = async ({ locals, request }) => { - /*const body = await request.text(); - - let title = ""; - let messages: Message[] = []; - - const values = z - .object({ - fromShare: z.string().optional(), - model: validateModel(models), - }) - .parse(JSON.parse(body)); - - if (values.fromShare) { - const conversation = await collections.sharedConversations.findOne({ - _id: values.fromShare, - }); - - title = conversation.title; - messages = conversation.messages; - values.model = conversation.model; - } - - const res = await collections.conversations.insertOne({ - _id: new ObjectId(), - title: - title || - "Untitled " + ((await collections.conversations.countDocuments(authCondition(locals))) + 1), - messages, - model: values.model, - createdAt: new Date(), - updatedAt: new Date(), - ...(locals.user ? { userId: locals.user._id } : { sessionId: locals.sessionId }), - ...(values.fromShare ? { meta: { fromShareId: values.fromShare } } : {}), - }); - - return new Response( - JSON.stringify({ - conversationId: res.insertedId.toString(), - }), - { headers: { "Content-Type": "application/json" } } - ); - - */ - - return new Response( - JSON.stringify({ - conversationId: "", - }), - { headers: { "Content-Type": "application/json" } } - ); -}; - -export const GET: RequestHandler = async () => { - throw redirect(302, `${base}/`); -}; diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/debug.py b/spaces/AchyuthGamer/OpenGPT/g4f/debug.py deleted file mode 100644 index 558a24288cf0b9ab46c3f731a60b9b87e6a8c319..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/debug.py +++ /dev/null @@ -1 +0,0 @@ -logging = False \ No newline at end of file diff --git a/spaces/Alpaca233/ai-stable-diffusion-Text-to-Image/README.md b/spaces/Alpaca233/ai-stable-diffusion-Text-to-Image/README.md deleted file mode 100644 index 2cef64fe3c6f167c5b166623075f12a87171c370..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/ai-stable-diffusion-Text-to-Image/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stabilityai Stable Diffusion Xl Base 1.0 -emoji: 📈 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -duplicated_from: mehedihassan/ai-stable-diffusion-Text-to-Image ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/__init__.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/README.md b/spaces/Amrrs/DragGan-Inversion/stylegan_human/README.md deleted file mode 100644 index 0442c284c6ce0e9e7a1d6d7f487debab8ccd1a1b..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/README.md +++ /dev/null @@ -1,229 +0,0 @@ -# StyleGAN-Human: A Data-Centric Odyssey of Human Generation - - - - -> -> -> **Abstract:** *Unconditional human image generation is an important task in vision and graphics, which enables various applications in the creative industry. Existing studies in this field mainly focus on "network engineering" such as designing new components and objective functions. This work takes a data-centric perspective and investigates multiple critical aspects in "data engineering", which we believe would complement the current practice. To facilitate a comprehensive study, we collect and annotate a large-scale human image dataset with over 230K samples capturing diverse poses and textures. Equipped with this large dataset, we rigorously investigate three essential factors in data engineering for StyleGAN-based human generation, namely data size, data distribution, and data alignment. Extensive experiments reveal several valuable observations w.r.t. these aspects: 1) Large-scale data, more than 40K images, are needed to train a high-fidelity unconditional human generation model with vanilla StyleGAN. 2) A balanced training set helps improve the generation quality with rare face poses compared to the long-tailed counterpart, whereas simply balancing the clothing texture distribution does not effectively bring an improvement. 3) Human GAN models with body centers for alignment outperform models trained using face centers or pelvis points as alignment anchors. In addition, a model zoo and human editing applications are demonstrated to facilitate future research in the community.*
    -**Keyword:** Human Image Generation, Data-Centric, StyleGAN - -[Jianglin Fu](mailto:fujianglin@sensetime.com), [Shikai Li](mailto:lishikai@sensetime.com), [Yuming Jiang](https://yumingj.github.io/), [Kwan-Yee Lin](https://kwanyeelin.github.io/), [Chen Qian](https://scholar.google.com/citations?user=AerkT0YAAAAJ&hl=zh-CN), [Chen Change Loy](https://www.mmlab-ntu.com/person/ccloy/), [Wayne Wu](https://wywu.github.io/), and [Ziwei Liu](https://liuziwei7.github.io/)
    -**[[Demo Video]](https://youtu.be/nIrb9hwsdcI)** | **[[Project Page]](https://stylegan-human.github.io/)** | **[[Paper]](https://arxiv.org/pdf/2204.11823.pdf)** - -## Updates -- [20/07/2022] [SHHQ-1.0](./docs/Dataset.md) dataset with 40K images is released! :sparkles: -- [15/06/2022] Data alignment and real-image inversion scripts are released. -- [26/04/2022] Technical report released! -- [22/04/2022] Technical report will be released before May. -- [21/04/2022] The codebase and project page are created. - -## Data Download -The first version SHHQ-1.0, with 40K images is released. To download and use the dataset set, please read the instructions in [Dataset.md](./docs/Dataset.md) - -(We are currently facing large incoming applications, and we need to carefully verify all the applicants, please be patient, and we will reply to you as soon as possible.) - -## Model Zoo - -| Structure | 1024x512 | Metric | Scores | 512x256 | Metric | Scores | -| --------- |:----------:| :----------:| :----------:| :-----: | :-----: | :-----: | -| StyleGAN1 |[stylegan_human_v1_1024.pkl](https://drive.google.com/file/d/1h-R-IV-INGdPEzj4P9ml6JTEvihuNgLX/view?usp=sharing)| fid50k | 3.79 | to be released | - | - | -| StyleGAN2 |[stylegan_human_v2_1024.pkl](https://drive.google.com/file/d/1FlAb1rYa0r_--Zj_ML8e6shmaF28hQb5/view?usp=sharing)| fid50k_full | 1.57 |[stylegan_human_v2_512.pkl](https://drive.google.com/file/d/1dlFEHbu-WzQWJl7nBBZYcTyo000H9hVm/view?usp=sharing) | fid50k_full | 1.97 | -| StyleGAN3 |to be released | - | - | [stylegan_human_v3_512.pkl](https://drive.google.com/file/d/1_274jk_N6WSCkKWeu7hjHycqGvbuOFf5/view?usp=sharing) | fid50k_full | 2.54 | - - - -## Web Demo - -Integrated into [Huggingface Spaces 🤗](https://huggingface.co/spaces) using [Gradio](https://github.com/gradio-app/gradio). Try out the Web Demo for generation: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/hysts/StyleGAN-Human) and interpolation [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/hysts/StyleGAN-Human-Interpolation) - - - - - -We prepare a Colab demo to allow you to synthesize images with the provided models, as well as visualize the performance of style-mixing, interpolation, and attributes editing. -The notebook will guide you to install the necessary environment and download pretrained models. The output images can be found in `./StyleGAN-Human/outputs/`. -Hope you enjoy! - -## Usage - -### System requirements -* The original code bases are [stylegan (tensorflow)](https://github.com/NVlabs/stylegan), [stylegan2-ada (pytorch)](https://github.com/NVlabs/stylegan2-ada-pytorch), [stylegan3 (pytorch)](https://github.com/NVlabs/stylegan3), released by NVidia - -* We tested in Python 3.8.5 and PyTorch 1.9.1 with CUDA 11.1. (See https://pytorch.org for PyTorch install instructions.) - -### Installation -To work with this project on your own machine, you need to install the environmnet as follows: - -``` -conda env create -f environment.yml -conda activate stylehuman -# [Optional: tensorflow 1.x is required for StyleGAN1. ] -pip install nvidia-pyindex -pip install nvidia-tensorflow[horovod] -pip install nvidia-tensorboard==1.15 -``` -Extra notes: -1. In case having some conflicts when calling CUDA version, please try to empty the LD_LIBRARY_PATH. For example: -``` -LD_LIBRARY_PATH=; python generate.py --outdir=out/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7 ---network=pretrained_models/stylegan_human_v2_1024.pkl --version 2 -``` - - -2. We found the following troubleshooting links might be helpful: [1.](https://github.com/NVlabs/stylegan3), [2.](https://github.com/NVlabs/stylegan3/blob/main/docs/troubleshooting.md) - -### Train -The training scripts are based on the original [stylegan1](https://github.com/NVlabs/stylegan), [stylegan2-ada](https://github.com/NVlabs/stylegan2-ada-pytorch), and [stylegan3](https://github.com/NVlabs/stylegan3) with minor changes. Here we only provide the scripts with modifications for SG2 and SG3. You can replace the old files with the provided scripts to train. (assume SHHQ-1.0 is placed under data/) - -#### Train Stylegan2-ada-pytorch with SHHQ-1.0 -``` -python train.py --outdir=training_results/sg2/ --data=data/SHHQ-1.0/ \ - --gpus=8 --aug=noaug --mirror=1 --snap=250 --cfg=shhq --square=False -``` -#### Train Stylegan3 with SHHQ-1.0 -``` -python train.py --outdir=training_results/sg3/ --cfg=stylegan3-r --gpus=8 --batch=32 --gamma=12.4 \ - --mirror=1 --aug=noaug --data=data/SHHQ-1.0/ --square=False --snap=250 -``` - -### Pretrained models -Please put the downloaded pretrained models [from above link](#Model-Zoo) under the folder 'pretrained_models'. - - -### Generate full-body human images using our pretrained model -``` -# Generate human full-body images without truncation -python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7 --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2 - -# Generate human full-body images with truncation -python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=0.8 --seeds=0-10 --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2 - -# Generate human full-body images using stylegan V1 -python generate.py --outdir=outputs/generate/stylegan_human_v1_1024 --network=pretrained_models/stylegan_human_v1_1024.pkl --version 1 --seeds=1,3,5 - -# Generate human full-body images using stylegan V3 -python generate.py --outdir=outputs/generate/stylegan_human_v3_512 --network=pretrained_models/stylegan_human_v3_512.pkl --version 3 --seeds=1,3,5 -``` - - -#### Note: The following demos are generated based on models related to StyleGAN V2 (stylegan_human_v2_512.pkl and stylegan_human_v2_1024.pkl). If you want to see results for V1 or V3, you need to change the loading method of the corresponding models. - - -### Interpolation -``` -python interpolation.py --network=pretrained_models/stylegan_human_v2_1024.pkl --seeds=85,100 --outdir=outputs/inter_gifs -``` - -### Style-mixing **image** using stylegan2 -``` -python style_mixing.py --network=pretrained_models/stylegan_human_v2_1024.pkl --rows=85,100,75,458,1500 \\ - --cols=55,821,1789,293 --styles=0-3 --outdir=outputs/stylemixing -``` - -### Style-mixing **video** using stylegan2 -``` -python stylemixing_video.py --network=pretrained_models/stylegan_human_v2_1024.pkl --row-seed=3859 \\ - --col-seeds=3098,31759,3791 --col-styles=8-12 --trunc=0.8 --outdir=outputs/stylemixing_video -``` - -### Aligned raw images -For alignment, we use [openpose-pytorch](https://github.com/Hzzone/pytorch-openpose) for body-keypoints detection and [PaddlePaddle](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.5/contrib/PP-HumanSeg) for human segmentation. -Before running the alignment script, few models need to be installed: -1. download [body_pose_model.pth](https://drive.google.com/drive/folders/1JsvI4M4ZTg98fmnCZLFM-3TeovnCRElG?usp=sharing) and place it into openpose/model/. -2. download and extract [deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax](https://paddleseg.bj.bcebos.com/dygraph/humanseg/export/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax.zip) into PP_HumanSeg/export_model/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax. -3. download and extract [deeplabv3p_resnet50_os8_humanseg_512x512_100k](https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/deeplabv3p_resnet50_os8_humanseg_512x512_100k.zip) into PP_HumanSeg/pretrained_model/deeplabv3p_resnet50_os8_humanseg_512x512_100k. -4. install paddlepaddel: ``` pip install paddleseg ``` - -Then you can start alignment: -``` -python alignment.py --image-folder img/test/ --output-folder aligned_image/ -``` - -### Invert real image with [PTI](https://github.com/danielroich/PTI) -Before inversion, please download our PTI weights: [e4e_w+.pt](https://drive.google.com/file/d/1NUfSJqLhsrU7c9PwAtlZ9xtrxhzS_6tu/view?usp=sharing) into /pti/. - -Few parameters you can change: -- /pti/pti_configs/hyperparameters.py: - - first_inv_type = 'w+' -> Use pretrained e4e encoder - - first_inv_type = 'w' -> Use projection and optimization -- /pti/pti_configs/paths_config.py: - - input_data_path: path of real images - - e4e: path of e4e_w+.pt - - stylegan2_ada_shhq: pretrained stylegan2-ada model for SHHQ - -``` -python run_pti.py -``` -Note: we used the test image under 'aligned_image/' (the output of alignment.py), the inverted latent code and fine-tuned generator will be saved in 'outputs/pti/' - - -### Editing with InterfaceGAN, StyleSpace, and Sefa -``` -python edit.py --network pretrained_models/stylegan_human_v2_1024.pkl --attr_name upper_length \\ - --seeds 61531,61570,61571,61610 --outdir outputs/edit_results -``` - -### Editing using inverted latent code -``` -python edit.py ---network outputs/pti/checkpoints/model_test.pkl --attr_name upper_length \\ - --outdir outputs/edit_results --real True --real_w_path outputs/pti/embeddings/test/PTI/test/0.pt --real_img_path aligned_image/test.png -``` - -Note: -1. ''upper_length'' and ''bottom_length'' of ''attr_name'' are available for demo. -2. Layers to control and editing strength are set in edit/edit_config.py. - - -### Demo for [InsetGAN](https://arxiv.org/abs/2203.07293) - -We implement a quick demo using the key idea from InsetGAN: combining the face generated by FFHQ with the human-body generated by our pretrained model, optimizing both face and body latent codes to get a coherent full-body image. -Before running the script, you need to download the [FFHQ face model]( https://docs.google.com/uc?export=download&confirm=t&id=125OG7SMkXI-Kf2aqiwLLHyCvSW-gZk3M), or you can use your own face model, as well as [pretrained face landmark](https://docs.google.com/uc?export=download&confirm=&id=1A82DnJBJzt8wI2J8ZrCK5fgHcQ2-tcWM) and [pretrained CNN face detection model for dlib](https://docs.google.com/uc?export=download&confirm=&id=1MduBgju5KFNrQfDLoQXJ_1_h5MnctCIG) -``` -python insetgan.py --body_network=pretrained_models/stylegan_human_v2_1024.pkl --face_network=pretrained_models/ffhq.pkl \\ - --body_seed=82 --face_seed=43 --trunc=0.6 --outdir=outputs/insetgan/ --video 1 -``` - -## Results - -### Editing with inverted real image -(from left to right: real image | inverted image | InterFaceGAN result | StyleSpace result | SeFa result) - -https://user-images.githubusercontent.com/98547009/173773800-bb7fe54a-84d3-4b30-9864-a6b7b311f8ff.mp4 - - -### For more demo, please visit our [**web page**](https://stylegan-human.github.io/) . - - -## TODO List - -- [ ] Release 1024x512 version of StyleGAN-Human based on StyleGAN3 -- [ ] Release 512x256 version of StyleGAN-Human based on StyleGAN1 -- [ ] Extension of downstream application (InsetGAN): Add face inversion interface to support fusing user face image and stylegen-human body image -- [x] Add Inversion Script into the provided editing pipeline -- [ ] Release Dataset - - -## Related Works -* (SIGGRAPH 2022) **Text2Human: Text-Driven Controllable Human Image Generation**, Yuming Jiang et al. [[Paper](https://arxiv.org/pdf/2205.15996.pdf)], [[Code](https://github.com/yumingj/Text2Human)], [[Project Page](https://yumingj.github.io/projects/Text2Human.html)], [[Dataset](https://github.com/yumingj/DeepFashion-MultiModal)] -* (ICCV 2021) **Talk-to-Edit: Fine-Grained Facial Editing via Dialog**, Yuming Jiang et al. [[Paper](https://arxiv.org/abs/2109.04425)], [[Code](https://github.com/yumingj/Talk-to-Edit)], [[Project Page](https://www.mmlab-ntu.com/project/talkedit/)], [[Dataset](https://mmlab.ie.cuhk.edu.hk/projects/CelebA/CelebA_Dialog.html)] -* (Technical Report 2022) **Generalizable Neural Performer: Learning Robust Radiance Fields for Human Novel View Synthesis**, Wei Cheng et al. [[Paper](https://arxiv.org/pdf/2204.11798.pdf)], [[Code](https://github.com/generalizable-neural-performer/gnr)], [[Project Page](https://generalizable-neural-performer.github.io/)], [[Dataset](https://generalizable-neural-performer.github.io/genebody.html)] - -## Citation - -If you find this work useful for your research, please consider citing our paper: - -```bibtex -@article{fu2022styleganhuman, - title={StyleGAN-Human: A Data-Centric Odyssey of Human Generation}, - author={Fu, Jianglin and Li, Shikai and Jiang, Yuming and Lin, Kwan-Yee and Qian, Chen and Loy, Chen-Change and Wu, Wayne and Liu, Ziwei}, - journal = {arXiv preprint}, - volume = {arXiv:2204.11823}, - year = {2022} -``` - -## Acknowlegement -Part of the code is borrowed from [stylegan (tensorflow)](https://github.com/NVlabs/stylegan), [stylegan2-ada (pytorch)](https://github.com/NVlabs/stylegan2-ada-pytorch), [stylegan3 (pytorch)](https://github.com/NVlabs/stylegan3). diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/edit/edit_helper.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/edit/edit_helper.py deleted file mode 100644 index 047e4d29d296306a008f7bb240c18e38e9757500..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/edit/edit_helper.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -from legacy import save_obj, load_pkl -import torch -from torch.nn import functional as F -import pandas as pd -from .edit_config import attr_dict -import os - - -def conv_warper(layer, input, style, noise): - # the conv should change - conv = layer.conv - batch, in_channel, height, width = input.shape - - style = style.view(batch, 1, in_channel, 1, 1) - weight = conv.scale * conv.weight * style - - if conv.demodulate: - demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) - weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1) - - weight = weight.view( - batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size - ) - - if conv.upsample: - input = input.view(1, batch * in_channel, height, width) - weight = weight.view( - batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size - ) - weight = weight.transpose(1, 2).reshape( - batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size - ) - out = F.conv_transpose2d( - input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, conv.out_channel, height, width) - out = conv.blur(out) - - elif conv.downsample: - input = conv.blur(input) - _, _, height, width = input.shape - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, conv.out_channel, height, width) - - else: - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=conv.padding, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, conv.out_channel, height, width) - - out = layer.noise(out, noise=noise) - out = layer.activate(out) - - return out - - -def decoder(G, style_space, latent, noise): - # an decoder warper for G - out = G.input(latent) - out = conv_warper(G.conv1, out, style_space[0], noise[0]) - skip = G.to_rgb1(out, latent[:, 1]) - - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs - ): - out = conv_warper(conv1, out, style_space[i], noise=noise1) - out = conv_warper(conv2, out, style_space[i+1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) - i += 2 - image = skip - - return image - - -def encoder_ifg(G, noise, attr_name, truncation=1, truncation_latent=None, - latent_dir='latent_direction/ss/', - step=0, total=0, real=False): - if not real: - styles = [noise] - styles = [G.style(s) for s in styles] - style_space = [] - - if truncation < 1: - if not real: - style_t = [] - for style in styles: - style_t.append(truncation_latent + truncation * - (style - truncation_latent)) - styles = style_t - else: # styles are latent (tensor: 1,18,512), for real PTI output - truncation_latent = truncation_latent.repeat( - 18, 1).unsqueeze(0) # (1,512) --> (1,18,512) - styles = torch.add(truncation_latent, torch.mul( - torch.sub(noise, truncation_latent), truncation)) - - noise = [getattr(G.noises, 'noise_{}'.format(i)) - for i in range(G.num_layers)] - if not real: - inject_index = G.n_latent - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: - latent = styles - - style_space.append(G.conv1.conv.modulation(latent[:, 0])) - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs - ): - style_space.append(conv1.conv.modulation(latent[:, i])) - style_space.append(conv2.conv.modulation(latent[:, i+1])) - i += 2 - - # get layer, strength by dict - strength = attr_dict['interface_gan'][attr_name][0] - - if step != 0 and total != 0: - strength = step / total * strength - for i in range(15): - style_vect = load_pkl(os.path.join( - latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, i))) - style_vect = torch.from_numpy(style_vect).to(latent.device).float() - style_space[i] += style_vect * strength - - return style_space, latent, noise - - -def encoder_ss(G, noise, attr_name, truncation=1, truncation_latent=None, - statics_dir="latent_direction/ss_statics", - latent_dir="latent_direction/ss/", - step=0, total=0, real=False): - if not real: - styles = [noise] - styles = [G.style(s) for s in styles] - style_space = [] - - if truncation < 1: - if not real: - style_t = [] - for style in styles: - style_t.append( - truncation_latent + truncation * - (style - truncation_latent) - ) - styles = style_t - else: # styles are latent (tensor: 1,18,512), for real PTI output - truncation_latent = truncation_latent.repeat( - 18, 1).unsqueeze(0) # (1,512) --> (1,18,512) - styles = torch.add(truncation_latent, torch.mul( - torch.sub(noise, truncation_latent), truncation)) - - noise = [getattr(G.noises, 'noise_{}'.format(i)) - for i in range(G.num_layers)] - - if not real: - inject_index = G.n_latent - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: - latent = styles - - style_space.append(G.conv1.conv.modulation(latent[:, 0])) - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs - ): - style_space.append(conv1.conv.modulation(latent[:, i])) - style_space.append(conv2.conv.modulation(latent[:, i+1])) - i += 2 - # get threshold, layer, strength by dict - layer, strength, threshold = attr_dict['stylespace'][attr_name] - - statis_dir = os.path.join( - statics_dir, "{}_statis/{}".format(attr_name, layer)) - statis_csv_path = os.path.join(statis_dir, "statis.csv") - statis_df = pd.read_csv(statis_csv_path) - statis_df = statis_df.sort_values(by='channel', ascending=True) - ch_mask = statis_df['strength'].values - ch_mask = torch.from_numpy(ch_mask).to(latent.device).float() - ch_mask = (ch_mask.abs() > threshold).float() - style_vect = load_pkl(os.path.join( - latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, layer))) - style_vect = torch.from_numpy(style_vect).to(latent.device).float() - - style_vect = style_vect * ch_mask - - if step != 0 and total != 0: - strength = step / total * strength - - style_space[layer] += style_vect * strength - - return style_space, latent, noise - - -def encoder_sefa(G, noise, attr_name, truncation=1, truncation_latent=None, - latent_dir='latent_direction/sefa/', - step=0, total=0, real=False): - if not real: - styles = [noise] - styles = [G.style(s) for s in styles] - - if truncation < 1: - if not real: - style_t = [] - for style in styles: - style_t.append( - truncation_latent + truncation * - (style - truncation_latent) - ) - styles = style_t - else: - truncation_latent = truncation_latent.repeat( - 18, 1).unsqueeze(0) # (1,512) --> (1,18,512) - styles = torch.add(truncation_latent, torch.mul( - torch.sub(noise, truncation_latent), truncation)) - - noise = [getattr(G.noises, 'noise_{}'.format(i)) - for i in range(G.num_layers)] - if not real: - inject_index = G.n_latent - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: - latent = styles - - layer, strength = attr_dict['sefa'][attr_name] - - sefa_vect = torch.load(os.path.join( - latent_dir, '{}.pt'.format(attr_name))).to(latent.device).float() - if step != 0 and total != 0: - strength = step / total * strength - for l in layer: - latent[:, l, :] += (sefa_vect * strength * 2) - - return latent, noise diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py deleted file mode 100644 index 417187a5719f57d060930f6b710bd2a04192ee6c..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +++ /dev/null @@ -1,1368 +0,0 @@ -import inspect -import warnings -from dataclasses import dataclass -from typing import Callable, List, Optional, Union - -import numpy as np -import PIL -import torch -from transformers import ( - CLIPImageProcessor, - CLIPTextModel, - CLIPTokenizer, - CLIPVisionModelWithProjection, - GPT2Tokenizer, -) - -from ...models import AutoencoderKL -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( - PIL_INTERPOLATION, - deprecate, - is_accelerate_available, - is_accelerate_version, - logging, - randn_tensor, -) -from ...utils.outputs import BaseOutput -from ..pipeline_utils import DiffusionPipeline -from .modeling_text_decoder import UniDiffuserTextDecoder -from .modeling_uvit import UniDiffuserModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess -def preprocess(image): - warnings.warn( - "The preprocess method is deprecated and will be removed in a future version. Please" - " use VaeImageProcessor.preprocess instead", - FutureWarning, - ) - if isinstance(image, torch.Tensor): - return image - elif isinstance(image, PIL.Image.Image): - image = [image] - - if isinstance(image[0], PIL.Image.Image): - w, h = image[0].size - w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 - - image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] - image = np.concatenate(image, axis=0) - image = np.array(image).astype(np.float32) / 255.0 - image = image.transpose(0, 3, 1, 2) - image = 2.0 * image - 1.0 - image = torch.from_numpy(image) - elif isinstance(image[0], torch.Tensor): - image = torch.cat(image, dim=0) - return image - - -# New BaseOutput child class for joint image-text output -@dataclass -class ImageTextPipelineOutput(BaseOutput): - """ - Output class for joint image-text pipelines. - - Args: - images (`List[PIL.Image.Image]` or `np.ndarray`) - List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, - num_channels)`. - text (`List[str]` or `List[List[str]]`) - List of generated text strings of length `batch_size` or a list of list of strings whose outer list has - length `batch_size`. - """ - - images: Optional[Union[List[PIL.Image.Image], np.ndarray]] - text: Optional[Union[List[str], List[List[str]]]] - - -class UniDiffuserPipeline(DiffusionPipeline): - r""" - Pipeline for a bimodal image-text model which supports unconditional text and image generation, text-conditioned - image generation, image-conditioned text generation, and joint image-text generation. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. This - is part of the UniDiffuser image representation along with the CLIP vision encoding. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). - image_encoder ([`CLIPVisionModel`]): - A [`~transformers.CLIPVisionModel`] to encode images as part of its image representation along with the VAE - latent representation. - image_processor ([`CLIPImageProcessor`]): - [`~transformers.CLIPImageProcessor`] to preprocess an image before CLIP encoding it with `image_encoder`. - clip_tokenizer ([`CLIPTokenizer`]): - A [`~transformers.CLIPTokenizer`] to tokenize the prompt before encoding it with `text_encoder`. - text_decoder ([`UniDiffuserTextDecoder`]): - Frozen text decoder. This is a GPT-style model which is used to generate text from the UniDiffuser - embedding. - text_tokenizer ([`GPT2Tokenizer`]): - A [`~transformers.GPT2Tokenizer`] to decode text for text generation; used along with the `text_decoder`. - unet ([`UniDiffuserModel`]): - A [U-ViT](https://github.com/baofff/U-ViT) model with UNNet-style skip connections between transformer - layers to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image and/or text latents. The - original UniDiffuser paper uses the [`DPMSolverMultistepScheduler`] scheduler. - """ - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - image_encoder: CLIPVisionModelWithProjection, - image_processor: CLIPImageProcessor, - clip_tokenizer: CLIPTokenizer, - text_decoder: UniDiffuserTextDecoder, - text_tokenizer: GPT2Tokenizer, - unet: UniDiffuserModel, - scheduler: KarrasDiffusionSchedulers, - ): - super().__init__() - - if text_encoder.config.hidden_size != text_decoder.prefix_inner_dim: - raise ValueError( - f"The text encoder hidden size and text decoder prefix inner dim must be the same, but" - f" `text_encoder.config.hidden_size`: {text_encoder.config.hidden_size} and `text_decoder.prefix_inner_dim`: {text_decoder.prefix_inner_dim}" - ) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - image_encoder=image_encoder, - image_processor=image_processor, - clip_tokenizer=clip_tokenizer, - text_decoder=text_decoder, - text_tokenizer=text_tokenizer, - unet=unet, - scheduler=scheduler, - ) - - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - - self.num_channels_latents = vae.config.latent_channels - self.text_encoder_seq_len = text_encoder.config.max_position_embeddings - self.text_encoder_hidden_size = text_encoder.config.hidden_size - self.image_encoder_projection_dim = image_encoder.config.projection_dim - self.unet_resolution = unet.config.sample_size - - self.text_intermediate_dim = self.text_encoder_hidden_size - if self.text_decoder.prefix_hidden_dim is not None: - self.text_intermediate_dim = self.text_decoder.prefix_hidden_dim - - self.mode = None - - # TODO: handle safety checking? - self.safety_checker = None - - # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload - # Add self.image_encoder, self.text_decoder to cpu_offloaded_models list - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared - to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` - method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with - `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - hook = None - for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae, self.image_encoder, self.text_decoder]: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - if self.safety_checker is not None: - _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def _infer_mode(self, prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents): - r""" - Infer the generation task ('mode') from the inputs to `__call__`. If the mode has been manually set, the set - mode will be used. - """ - prompt_available = (prompt is not None) or (prompt_embeds is not None) - image_available = image is not None - input_available = prompt_available or image_available - - prompt_latents_available = prompt_latents is not None - vae_latents_available = vae_latents is not None - clip_latents_available = clip_latents is not None - full_latents_available = latents is not None - image_latents_available = vae_latents_available and clip_latents_available - all_indv_latents_available = prompt_latents_available and image_latents_available - - if self.mode is not None: - # Preferentially use the mode set by the user - mode = self.mode - elif prompt_available: - mode = "text2img" - elif image_available: - mode = "img2text" - else: - # Neither prompt nor image supplied, infer based on availability of latents - if full_latents_available or all_indv_latents_available: - mode = "joint" - elif prompt_latents_available: - mode = "text" - elif image_latents_available: - mode = "img" - else: - # No inputs or latents available - mode = "joint" - - # Give warnings for ambiguous cases - if self.mode is None and prompt_available and image_available: - logger.warning( - f"You have supplied both a text prompt and image to the pipeline and mode has not been set manually," - f" defaulting to mode '{mode}'." - ) - - if self.mode is None and not input_available: - if vae_latents_available != clip_latents_available: - # Exactly one of vae_latents and clip_latents is supplied - logger.warning( - f"You have supplied exactly one of `vae_latents` and `clip_latents`, whereas either both or none" - f" are expected to be supplied. Defaulting to mode '{mode}'." - ) - elif not prompt_latents_available and not vae_latents_available and not clip_latents_available: - # No inputs or latents supplied - logger.warning( - f"No inputs or latents have been supplied, and mode has not been manually set," - f" defaulting to mode '{mode}'." - ) - - return mode - - # Functions to manually set the mode - def set_text_mode(self): - r"""Manually set the generation mode to unconditional ("marginal") text generation.""" - self.mode = "text" - - def set_image_mode(self): - r"""Manually set the generation mode to unconditional ("marginal") image generation.""" - self.mode = "img" - - def set_text_to_image_mode(self): - r"""Manually set the generation mode to text-conditioned image generation.""" - self.mode = "text2img" - - def set_image_to_text_mode(self): - r"""Manually set the generation mode to image-conditioned text generation.""" - self.mode = "img2text" - - def set_joint_mode(self): - r"""Manually set the generation mode to unconditional joint image-text generation.""" - self.mode = "joint" - - def reset_mode(self): - r"""Removes a manually set mode; after calling this, the pipeline will infer the mode from inputs.""" - self.mode = None - - def _infer_batch_size( - self, - mode, - prompt, - prompt_embeds, - image, - num_images_per_prompt, - num_prompts_per_image, - latents, - prompt_latents, - vae_latents, - clip_latents, - ): - r"""Infers the batch size and multiplier depending on mode and supplied arguments to `__call__`.""" - if num_images_per_prompt is None: - num_images_per_prompt = 1 - if num_prompts_per_image is None: - num_prompts_per_image = 1 - - assert num_images_per_prompt > 0, "num_images_per_prompt must be a positive integer" - assert num_prompts_per_image > 0, "num_prompts_per_image must be a positive integer" - - if mode in ["text2img"]: - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - # Either prompt or prompt_embeds must be present for text2img. - batch_size = prompt_embeds.shape[0] - multiplier = num_images_per_prompt - elif mode in ["img2text"]: - if isinstance(image, PIL.Image.Image): - batch_size = 1 - else: - # Image must be available and type either PIL.Image.Image or torch.FloatTensor. - # Not currently supporting something like image_embeds. - batch_size = image.shape[0] - multiplier = num_prompts_per_image - elif mode in ["img"]: - if vae_latents is not None: - batch_size = vae_latents.shape[0] - elif clip_latents is not None: - batch_size = clip_latents.shape[0] - else: - batch_size = 1 - multiplier = num_images_per_prompt - elif mode in ["text"]: - if prompt_latents is not None: - batch_size = prompt_latents.shape[0] - else: - batch_size = 1 - multiplier = num_prompts_per_image - elif mode in ["joint"]: - if latents is not None: - batch_size = latents.shape[0] - elif prompt_latents is not None: - batch_size = prompt_latents.shape[0] - elif vae_latents is not None: - batch_size = vae_latents.shape[0] - elif clip_latents is not None: - batch_size = clip_latents.shape[0] - else: - batch_size = 1 - - if num_images_per_prompt == num_prompts_per_image: - multiplier = num_images_per_prompt - else: - multiplier = min(num_images_per_prompt, num_prompts_per_image) - logger.warning( - f"You are using mode `{mode}` and `num_images_per_prompt`: {num_images_per_prompt} and" - f" num_prompts_per_image: {num_prompts_per_image} are not equal. Using batch size equal to" - f" `min(num_images_per_prompt, num_prompts_per_image) = {batch_size}." - ) - return batch_size, multiplier - - # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt - # self.tokenizer => self.clip_tokenizer - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. - Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - """ - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - if prompt_embeds is None: - text_inputs = self.clip_tokenizer( - prompt, - padding="max_length", - max_length=self.clip_tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.clip_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.clip_tokenizer.batch_decode( - untruncated_ids[:, self.clip_tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.clip_tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - prompt_embeds = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - prompt_embeds = prompt_embeds[0] - - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = prompt_embeds.shape[1] - uncond_input = self.clip_tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - negative_prompt_embeds = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - negative_prompt_embeds = negative_prompt_embeds[0] - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - return prompt_embeds - - # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_instruct_pix2pix.StableDiffusionInstructPix2PixPipeline.prepare_image_latents - # Add num_prompts_per_image argument, sample from autoencoder moment distribution - def encode_image_vae_latents( - self, - image, - batch_size, - num_prompts_per_image, - dtype, - device, - do_classifier_free_guidance, - generator=None, - ): - if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): - raise ValueError( - f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" - ) - - image = image.to(device=device, dtype=dtype) - - batch_size = batch_size * num_prompts_per_image - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if isinstance(generator, list): - image_latents = [ - self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i]) - * self.vae.config.scaling_factor - for i in range(batch_size) - ] - image_latents = torch.cat(image_latents, dim=0) - else: - image_latents = self.vae.encode(image).latent_dist.sample(generator=generator) - # Scale image_latents by the VAE's scaling factor - image_latents = image_latents * self.vae.config.scaling_factor - - if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: - # expand image_latents for batch_size - deprecation_message = ( - f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" - " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" - " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" - " your script to pass as many initial images as text prompts to suppress this warning." - ) - deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) - additional_image_per_prompt = batch_size // image_latents.shape[0] - image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) - elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: - raise ValueError( - f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." - ) - else: - image_latents = torch.cat([image_latents], dim=0) - - if do_classifier_free_guidance: - uncond_image_latents = torch.zeros_like(image_latents) - image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) - - return image_latents - - def encode_image_clip_latents( - self, - image, - batch_size, - num_prompts_per_image, - dtype, - device, - generator=None, - ): - # Map image to CLIP embedding. - if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): - raise ValueError( - f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" - ) - - preprocessed_image = self.image_processor.preprocess( - image, - return_tensors="pt", - ) - preprocessed_image = preprocessed_image.to(device=device, dtype=dtype) - - batch_size = batch_size * num_prompts_per_image - if isinstance(generator, list): - image_latents = [ - self.image_encoder(**preprocessed_image[i : i + 1]).image_embeds for i in range(batch_size) - ] - image_latents = torch.cat(image_latents, dim=0) - else: - image_latents = self.image_encoder(**preprocessed_image).image_embeds - - if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: - # expand image_latents for batch_size - deprecation_message = ( - f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" - " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" - " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" - " your script to pass as many initial images as text prompts to suppress this warning." - ) - deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) - additional_image_per_prompt = batch_size // image_latents.shape[0] - image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) - elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: - raise ValueError( - f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." - ) - else: - image_latents = torch.cat([image_latents], dim=0) - - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - return image_latents - - # Note that the CLIP latents are not decoded for image generation. - # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents - # Rename: decode_latents -> decode_image_latents - def decode_image_latents(self, latents): - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.decode(latents, return_dict=False)[0] - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - def prepare_text_latents( - self, batch_size, num_images_per_prompt, seq_len, hidden_size, dtype, device, generator, latents=None - ): - # Prepare latents for the CLIP embedded prompt. - shape = (batch_size * num_images_per_prompt, seq_len, hidden_size) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - # latents is assumed to have shace (B, L, D) - latents = latents.repeat(num_images_per_prompt, 1, 1) - latents = latents.to(device=device, dtype=dtype) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents - # Rename prepare_latents -> prepare_image_vae_latents and add num_prompts_per_image argument. - def prepare_image_vae_latents( - self, - batch_size, - num_prompts_per_image, - num_channels_latents, - height, - width, - dtype, - device, - generator, - latents=None, - ): - shape = ( - batch_size * num_prompts_per_image, - num_channels_latents, - height // self.vae_scale_factor, - width // self.vae_scale_factor, - ) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - # latents is assumed to have shape (B, C, H, W) - latents = latents.repeat(num_prompts_per_image, 1, 1, 1) - latents = latents.to(device=device, dtype=dtype) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - def prepare_image_clip_latents( - self, batch_size, num_prompts_per_image, clip_img_dim, dtype, device, generator, latents=None - ): - # Prepare latents for the CLIP embedded image. - shape = (batch_size * num_prompts_per_image, 1, clip_img_dim) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - # latents is assumed to have shape (B, L, D) - latents = latents.repeat(num_prompts_per_image, 1, 1) - latents = latents.to(device=device, dtype=dtype) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - def _split(self, x, height, width): - r""" - Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim) into two tensors of shape (B, C, H, W) - and (B, 1, clip_img_dim) - """ - batch_size = x.shape[0] - latent_height = height // self.vae_scale_factor - latent_width = width // self.vae_scale_factor - img_vae_dim = self.num_channels_latents * latent_height * latent_width - - img_vae, img_clip = x.split([img_vae_dim, self.image_encoder_projection_dim], dim=1) - - img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) - img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) - return img_vae, img_clip - - def _combine(self, img_vae, img_clip): - r""" - Combines a latent iamge img_vae of shape (B, C, H, W) and a CLIP-embedded image img_clip of shape (B, 1, - clip_img_dim) into a single tensor of shape (B, C * H * W + clip_img_dim). - """ - img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) - img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) - return torch.concat([img_vae, img_clip], dim=-1) - - def _split_joint(self, x, height, width): - r""" - Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim + text_seq_len * text_dim] into (img_vae, - img_clip, text) where img_vae is of shape (B, C, H, W), img_clip is of shape (B, 1, clip_img_dim), and text is - of shape (B, text_seq_len, text_dim). - """ - batch_size = x.shape[0] - latent_height = height // self.vae_scale_factor - latent_width = width // self.vae_scale_factor - img_vae_dim = self.num_channels_latents * latent_height * latent_width - text_dim = self.text_encoder_seq_len * self.text_intermediate_dim - - img_vae, img_clip, text = x.split([img_vae_dim, self.image_encoder_projection_dim, text_dim], dim=1) - - img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) - img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) - text = torch.reshape(text, (batch_size, self.text_encoder_seq_len, self.text_intermediate_dim)) - return img_vae, img_clip, text - - def _combine_joint(self, img_vae, img_clip, text): - r""" - Combines a latent image img_vae of shape (B, C, H, W), a CLIP-embedded image img_clip of shape (B, L_img, - clip_img_dim), and a text embedding text of shape (B, L_text, text_dim) into a single embedding x of shape (B, - C * H * W + L_img * clip_img_dim + L_text * text_dim). - """ - img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) - img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) - text = torch.reshape(text, (text.shape[0], -1)) - return torch.concat([img_vae, img_clip, text], dim=-1) - - def _get_noise_pred( - self, - mode, - latents, - t, - prompt_embeds, - img_vae, - img_clip, - max_timestep, - data_type, - guidance_scale, - generator, - device, - height, - width, - ): - r""" - Gets the noise prediction using the `unet` and performs classifier-free guidance, if necessary. - """ - if mode == "joint": - # Joint text-image generation - img_vae_latents, img_clip_latents, text_latents = self._split_joint(latents, height, width) - - img_vae_out, img_clip_out, text_out = self.unet( - img_vae_latents, img_clip_latents, text_latents, timestep_img=t, timestep_text=t, data_type=data_type - ) - - x_out = self._combine_joint(img_vae_out, img_clip_out, text_out) - - if guidance_scale <= 1.0: - return x_out - - # Classifier-free guidance - img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) - img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) - text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) - - _, _, text_out_uncond = self.unet( - img_vae_T, img_clip_T, text_latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type - ) - - img_vae_out_uncond, img_clip_out_uncond, _ = self.unet( - img_vae_latents, - img_clip_latents, - text_T, - timestep_img=t, - timestep_text=max_timestep, - data_type=data_type, - ) - - x_out_uncond = self._combine_joint(img_vae_out_uncond, img_clip_out_uncond, text_out_uncond) - - return guidance_scale * x_out + (1.0 - guidance_scale) * x_out_uncond - elif mode == "text2img": - # Text-conditioned image generation - img_vae_latents, img_clip_latents = self._split(latents, height, width) - - img_vae_out, img_clip_out, text_out = self.unet( - img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=0, data_type=data_type - ) - - img_out = self._combine(img_vae_out, img_clip_out) - - if guidance_scale <= 1.0: - return img_out - - # Classifier-free guidance - text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) - - img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( - img_vae_latents, - img_clip_latents, - text_T, - timestep_img=t, - timestep_text=max_timestep, - data_type=data_type, - ) - - img_out_uncond = self._combine(img_vae_out_uncond, img_clip_out_uncond) - - return guidance_scale * img_out + (1.0 - guidance_scale) * img_out_uncond - elif mode == "img2text": - # Image-conditioned text generation - img_vae_out, img_clip_out, text_out = self.unet( - img_vae, img_clip, latents, timestep_img=0, timestep_text=t, data_type=data_type - ) - - if guidance_scale <= 1.0: - return text_out - - # Classifier-free guidance - img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) - img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) - - img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( - img_vae_T, img_clip_T, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type - ) - - return guidance_scale * text_out + (1.0 - guidance_scale) * text_out_uncond - elif mode == "text": - # Unconditional ("marginal") text generation (no CFG) - img_vae_out, img_clip_out, text_out = self.unet( - img_vae, img_clip, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type - ) - - return text_out - elif mode == "img": - # Unconditional ("marginal") image generation (no CFG) - img_vae_latents, img_clip_latents = self._split(latents, height, width) - - img_vae_out, img_clip_out, text_out = self.unet( - img_vae_latents, - img_clip_latents, - prompt_embeds, - timestep_img=t, - timestep_text=max_timestep, - data_type=data_type, - ) - - img_out = self._combine(img_vae_out, img_clip_out) - return img_out - - def check_latents_shape(self, latents_name, latents, expected_shape): - latents_shape = latents.shape - expected_num_dims = len(expected_shape) + 1 # expected dimensions plus the batch dimension - expected_shape_str = ", ".join(str(dim) for dim in expected_shape) - if len(latents_shape) != expected_num_dims: - raise ValueError( - f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" - f" {latents_shape} has {len(latents_shape)} dimensions." - ) - for i in range(1, expected_num_dims): - if latents_shape[i] != expected_shape[i - 1]: - raise ValueError( - f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" - f" {latents_shape} has {latents_shape[i]} != {expected_shape[i - 1]} at dimension {i}." - ) - - def check_inputs( - self, - mode, - prompt, - image, - height, - width, - callback_steps, - negative_prompt=None, - prompt_embeds=None, - negative_prompt_embeds=None, - latents=None, - prompt_latents=None, - vae_latents=None, - clip_latents=None, - ): - # Check inputs before running the generative process. - if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0: - raise ValueError( - f"`height` and `width` have to be divisible by {self.vae_scale_factor} but are {height} and {width}." - ) - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if mode == "text2img": - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - if mode == "img2text": - if image is None: - raise ValueError("`img2text` mode requires an image to be provided.") - - # Check provided latents - latent_height = height // self.vae_scale_factor - latent_width = width // self.vae_scale_factor - full_latents_available = latents is not None - prompt_latents_available = prompt_latents is not None - vae_latents_available = vae_latents is not None - clip_latents_available = clip_latents is not None - - if full_latents_available: - individual_latents_available = ( - prompt_latents is not None or vae_latents is not None or clip_latents is not None - ) - if individual_latents_available: - logger.warning( - "You have supplied both `latents` and at least one of `prompt_latents`, `vae_latents`, and" - " `clip_latents`. The value of `latents` will override the value of any individually supplied latents." - ) - # Check shape of full latents - img_vae_dim = self.num_channels_latents * latent_height * latent_width - text_dim = self.text_encoder_seq_len * self.text_encoder_hidden_size - latents_dim = img_vae_dim + self.image_encoder_projection_dim + text_dim - latents_expected_shape = (latents_dim,) - self.check_latents_shape("latents", latents, latents_expected_shape) - - # Check individual latent shapes, if present - if prompt_latents_available: - prompt_latents_expected_shape = (self.text_encoder_seq_len, self.text_encoder_hidden_size) - self.check_latents_shape("prompt_latents", prompt_latents, prompt_latents_expected_shape) - - if vae_latents_available: - vae_latents_expected_shape = (self.num_channels_latents, latent_height, latent_width) - self.check_latents_shape("vae_latents", vae_latents, vae_latents_expected_shape) - - if clip_latents_available: - clip_latents_expected_shape = (1, self.image_encoder_projection_dim) - self.check_latents_shape("clip_latents", clip_latents, clip_latents_expected_shape) - - if mode in ["text2img", "img"] and vae_latents_available and clip_latents_available: - if vae_latents.shape[0] != clip_latents.shape[0]: - raise ValueError( - f"Both `vae_latents` and `clip_latents` are supplied, but their batch dimensions are not equal:" - f" {vae_latents.shape[0]} != {clip_latents.shape[0]}." - ) - - if mode == "joint" and prompt_latents_available and vae_latents_available and clip_latents_available: - if prompt_latents.shape[0] != vae_latents.shape[0] or prompt_latents.shape[0] != clip_latents.shape[0]: - raise ValueError( - f"All of `prompt_latents`, `vae_latents`, and `clip_latents` are supplied, but their batch" - f" dimensions are not equal: {prompt_latents.shape[0]} != {vae_latents.shape[0]}" - f" != {clip_latents.shape[0]}." - ) - - @torch.no_grad() - def __call__( - self, - prompt: Optional[Union[str, List[str]]] = None, - image: Optional[Union[torch.FloatTensor, PIL.Image.Image]] = None, - height: Optional[int] = None, - width: Optional[int] = None, - data_type: Optional[int] = 1, - num_inference_steps: int = 50, - guidance_scale: float = 8.0, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - num_prompts_per_image: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_latents: Optional[torch.FloatTensor] = None, - vae_latents: Optional[torch.FloatTensor] = None, - clip_latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - ): - r""" - The call function to the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. - Required for text-conditioned image generation (`text2img`) mode. - image (`torch.FloatTensor` or `PIL.Image.Image`, *optional*): - `Image` or tensor representing an image batch. Required for image-conditioned text generation - (`img2text`) mode. - height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The width in pixels of the generated image. - data_type (`int`, *optional*, defaults to 1): - The data type (either 0 or 1). Only used if you are loading a checkpoint which supports a data type - embedding; this is added for compatibility with the - [UniDiffuser-v1](https://huggingface.co/thu-ml/unidiffuser-v1) checkpoint. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 8.0): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide what to not include in image generation. If not defined, you need to - pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). Used in - text-conditioned image generation (`text2img`) mode. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. Used in `text2img` (text-conditioned image generation) and - `img` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are - supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. - num_prompts_per_image (`int`, *optional*, defaults to 1): - The number of prompts to generate per image. Used in `img2text` (image-conditioned text generation) and - `text` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are - supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies - to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for joint - image-text generation. Can be used to tweak the same generation with different prompts. If not - provided, a latents tensor is generated by sampling using the supplied random `generator`. This assumes - a full set of VAE, CLIP, and text latents, if supplied, overrides the value of `prompt_latents`, - `vae_latents`, and `clip_latents`. - prompt_latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for text - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - vae_latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - clip_latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not - provided, text embeddings are generated from the `prompt` input argument. Used in text-conditioned - image generation (`text2img`) mode. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If - not provided, `negative_prompt_embeds` are be generated from the `negative_prompt` input argument. Used - in text-conditioned image generation (`text2img`) mode. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.ImageTextPipelineOutput`] instead of a plain tuple. - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. - - Returns: - [`~pipelines.unidiffuser.ImageTextPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.unidiffuser.ImageTextPipelineOutput`] is returned, otherwise a - `tuple` is returned where the first element is a list with the generated images and the second element - is a list of generated texts. - """ - - # 0. Default height and width to unet - height = height or self.unet_resolution * self.vae_scale_factor - width = width or self.unet_resolution * self.vae_scale_factor - - # 1. Check inputs - # Recalculate mode for each call to the pipeline. - mode = self._infer_mode(prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents) - self.check_inputs( - mode, - prompt, - image, - height, - width, - callback_steps, - negative_prompt, - prompt_embeds, - negative_prompt_embeds, - latents, - prompt_latents, - vae_latents, - clip_latents, - ) - - # 2. Define call parameters - batch_size, multiplier = self._infer_batch_size( - mode, - prompt, - prompt_embeds, - image, - num_images_per_prompt, - num_prompts_per_image, - latents, - prompt_latents, - vae_latents, - clip_latents, - ) - device = self._execution_device - reduce_text_emb_dim = self.text_intermediate_dim < self.text_encoder_hidden_size or self.mode != "text2img" - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - # Note that this differs from the formulation in the unidiffusers paper! - # do_classifier_free_guidance = guidance_scale > 1.0 - - # check if scheduler is in sigmas space - # scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas") - - # 3. Encode input prompt, if available; otherwise prepare text latents - if latents is not None: - # Overwrite individual latents - vae_latents, clip_latents, prompt_latents = self._split_joint(latents, height, width) - - if mode in ["text2img"]: - # 3.1. Encode input prompt, if available - assert prompt is not None or prompt_embeds is not None - prompt_embeds = self._encode_prompt( - prompt=prompt, - device=device, - num_images_per_prompt=multiplier, - do_classifier_free_guidance=False, # don't support standard classifier-free guidance for now - negative_prompt=negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - ) - else: - # 3.2. Prepare text latent variables, if input not available - prompt_embeds = self.prepare_text_latents( - batch_size=batch_size, - num_images_per_prompt=multiplier, - seq_len=self.text_encoder_seq_len, - hidden_size=self.text_encoder_hidden_size, - dtype=self.text_encoder.dtype, # Should work with both full precision and mixed precision - device=device, - generator=generator, - latents=prompt_latents, - ) - - if reduce_text_emb_dim: - prompt_embeds = self.text_decoder.encode(prompt_embeds) - - # 4. Encode image, if available; otherwise prepare image latents - if mode in ["img2text"]: - # 4.1. Encode images, if available - assert image is not None, "`img2text` requires a conditioning image" - # Encode image using VAE - image_vae = preprocess(image) - height, width = image_vae.shape[-2:] - image_vae_latents = self.encode_image_vae_latents( - image=image_vae, - batch_size=batch_size, - num_prompts_per_image=multiplier, - dtype=prompt_embeds.dtype, - device=device, - do_classifier_free_guidance=False, # Copied from InstructPix2Pix, don't use their version of CFG - generator=generator, - ) - - # Encode image using CLIP - image_clip_latents = self.encode_image_clip_latents( - image=image, - batch_size=batch_size, - num_prompts_per_image=multiplier, - dtype=prompt_embeds.dtype, - device=device, - generator=generator, - ) - # (batch_size, clip_hidden_size) => (batch_size, 1, clip_hidden_size) - image_clip_latents = image_clip_latents.unsqueeze(1) - else: - # 4.2. Prepare image latent variables, if input not available - # Prepare image VAE latents in latent space - image_vae_latents = self.prepare_image_vae_latents( - batch_size=batch_size, - num_prompts_per_image=multiplier, - num_channels_latents=self.num_channels_latents, - height=height, - width=width, - dtype=prompt_embeds.dtype, - device=device, - generator=generator, - latents=vae_latents, - ) - - # Prepare image CLIP latents - image_clip_latents = self.prepare_image_clip_latents( - batch_size=batch_size, - num_prompts_per_image=multiplier, - clip_img_dim=self.image_encoder_projection_dim, - dtype=prompt_embeds.dtype, - device=device, - generator=generator, - latents=clip_latents, - ) - - # 5. Set timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - # max_timestep = timesteps[0] - max_timestep = self.scheduler.config.num_train_timesteps - - # 6. Prepare latent variables - if mode == "joint": - latents = self._combine_joint(image_vae_latents, image_clip_latents, prompt_embeds) - elif mode in ["text2img", "img"]: - latents = self._combine(image_vae_latents, image_clip_latents) - elif mode in ["img2text", "text"]: - latents = prompt_embeds - - # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - logger.debug(f"Scheduler extra step kwargs: {extra_step_kwargs}") - - # 8. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # predict the noise residual - # Also applies classifier-free guidance as described in the UniDiffuser paper - noise_pred = self._get_noise_pred( - mode, - latents, - t, - prompt_embeds, - image_vae_latents, - image_clip_latents, - max_timestep, - data_type, - guidance_scale, - generator, - device, - height, - width, - ) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # 9. Post-processing - gen_image = None - gen_text = None - if mode == "joint": - image_vae_latents, image_clip_latents, text_latents = self._split_joint(latents, height, width) - - # Map latent VAE image back to pixel space - gen_image = self.decode_image_latents(image_vae_latents) - - # Generate text using the text decoder - output_token_list, seq_lengths = self.text_decoder.generate_captions( - text_latents, self.text_tokenizer.eos_token_id, device=device - ) - output_list = output_token_list.cpu().numpy() - gen_text = [ - self.text_tokenizer.decode(output[: int(length)], skip_special_tokens=True) - for output, length in zip(output_list, seq_lengths) - ] - elif mode in ["text2img", "img"]: - image_vae_latents, image_clip_latents = self._split(latents, height, width) - gen_image = self.decode_image_latents(image_vae_latents) - elif mode in ["img2text", "text"]: - text_latents = latents - output_token_list, seq_lengths = self.text_decoder.generate_captions( - text_latents, self.text_tokenizer.eos_token_id, device=device - ) - output_list = output_token_list.cpu().numpy() - gen_text = [ - self.text_tokenizer.decode(output[: int(length)], skip_special_tokens=True) - for output, length in zip(output_list, seq_lengths) - ] - - # 10. Convert to PIL - if output_type == "pil" and gen_image is not None: - gen_image = self.numpy_to_pil(gen_image) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (gen_image, gen_text) - - return ImageTextPipelineOutput(images=gen_image, text=gen_text) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context.py b/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context.py deleted file mode 100644 index cf315a4f0e6f397768572c590a634cc1b9d298a9..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=60), - test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) -optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/Benson/text-generation/Examples/Air Game Apk.md b/spaces/Benson/text-generation/Examples/Air Game Apk.md deleted file mode 100644 index d813f26b6228a6f794c7e7563bfd3787e7839f82..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Air Game Apk.md +++ /dev/null @@ -1,120 +0,0 @@ - -

    Aire juego APK: Cómo jugar juegos multijugador en su dispositivo Android

    -

    ¿Te encanta jugar juegos multijugador con tus amigos y familiares? ¿Quieres disfrutar de la emoción de los juegos de árcade, juegos de carreras, juegos de fiesta y más en tu dispositivo Android? Si es así, entonces usted debe probar Air Game APK, una colección de dos increíbles juegos para Android que le permiten jugar juegos multijugador en su dispositivo. En este artículo, le diremos qué es Air Game APK, cómo descargarlo e instalarlo, y cómo jugar juegos multijugador en él.

    -

    air game apk


    Download Filehttps://bltlly.com/2v6KvG



    -

    ¿Qué es el juego de aire APK?

    -

    Air Game APK es un paquete de dos juegos para Android que le permiten jugar juegos multijugador en su dispositivo. Los dos juegos son:

    -

    AirConsole: una consola de videojuegos multijugador para Android

    -

    AirConsole es una consola de videojuegos multijugador que te permite jugar juegos en tu Android TV, Amazon Fire TV, Tablet u Computer como consola, y usar tus teléfonos inteligentes como controladores. AirConsole es rápido, divertido y fácil de comenzar. Puedes elegir entre cientos de juegos de diferentes géneros, como árcade, carreras, fiestas, trivialidades, deportes y más. También puedes crear tus propios juegos usando las herramientas para desarrolladores de AirConsole. AirConsole es una gran manera de disfrutar de los juegos con sus amigos y familiares sin comprar consolas o controladores caros.

    -

    1945 Fuerza Aérea: un clásico juego de disparos de aviones para Android

    - -

    ¿Cómo descargar e instalar Air Game APK?

    -

    Para descargar e instalar Air Game APK, es necesario seguir estos pasos:

    -

    Pasos para descargar e instalar AirConsole APK

    -
      -
    1. Ir al sitio web oficial de AirConsole en https://www.airconsole.com/ o buscar "AirConsole" en Google Play Store.
    2. -
    3. Haga clic en el botón "Descargar" o el botón "Instalar" para descargar el archivo APK AirConsole.
    4. -
    5. Una vez completada la descarga, abra la aplicación de administrador de archivos en su dispositivo y localice el archivo descargado.
    6. -
    7. Toque en el archivo y permita la instalación desde fuentes desconocidas si se le solicita.
    8. -
    9. Siga las instrucciones en la pantalla para completar la instalación.
    10. -
    11. Inicie la aplicación AirConsole y disfrute jugando juegos multijugador en su dispositivo.
    12. -
    -

    Pasos para descargar e instalar 1945 Fuerza Aérea APK

    -
      -
    1. Ir al sitio web oficial de la Fuerza Aérea de 1945 en https://www.onesoft.com.vn/ o buscar "1945 Air Force" en Google Play Store.
    2. -
    3. Haga clic en el botón "Descargar" o el botón "Instalar" para descargar el archivo APK de la Fuerza Aérea de 1945.
    4. -
    5. Una vez completada la descarga, abra la aplicación de administrador de archivos en su dispositivo y localice el archivo descargado.
    6. -
    7. Toque en el archivo y permita la instalación desde fuentes desconocidas si se le solicita.
    8. -
    9. Siga las instrucciones en la pantalla para completar la instalación.
    10. -
    11. Lanza la aplicación 1945 Fuerza Aérea y disfruta jugando juegos multijugador en tu dispositivo.
    12. -
    -

    ¿Cómo jugar juegos multijugador en Air Game APK?

    -

    Ahora que ha descargado e instalado Air Game APK, puede comenzar a jugar juegos multijugador en su dispositivo. Aquí hay algunos consejos sobre cómo jugar juegos multijugador en AirConsole APK y 1945 Fuerza Aérea APK.

    -

    -

    Cómo jugar juegos multijugador en AirConsole APK

    -

    Para jugar juegos multijugador en AirConsole APK, es necesario hacer lo siguiente:

    -

    Conecta tu dispositivo Android a una pantalla

    - - -

    Usa tu smartphone como controlador

    -

    Puedes usar tu smartphone como controlador para jugar en AirConsole. Puede utilizar la pantalla táctil, el giroscopio o el micrófono de su teléfono inteligente como métodos de entrada. Para usar tu smartphone como controlador, sigue estos pasos:

    - -

    Elige entre cientos de juegos de diferentes géneros

    -

    Puedes elegir entre cientos de juegos de diferentes géneros para jugar en AirConsole. Puedes encontrar juegos para todas las edades y gustos, como árcade, carreras, fiestas, trivialidades, deportes y más. Para elegir un juego para jugar en AirConsole, sigue estos pasos:

    - -

    Cómo jugar juegos multijugador en 1945 Fuerza Aérea APK

    -

    Para jugar juegos multijugador en 1945 Fuerza Aérea APK, es necesario hacer lo siguiente:

    -

    Elige tu avión de combate y personalízalo

    -

    Puedes elegir entre más de 200 aviones de combate de diferentes países y personalizarlos con varias armas y equipos. Para elegir y personalizar su avión de combate, siga estos pasos:

    - -

    Únete a un escuadrón y lucha contra los enemigos

    -

    Puedes unirte a un escuadrón y luchar contra enemigos en solitario o en equipo. Para unirte a un escuadrón y luchar contra enemigos, sigue estos pasos:

    - -

    Recoge monedas y recompensas para mejorar tu avión

    -

    Puedes recoger monedas y recompensas para actualizar tu avión y desbloquear nuevas características. Para recoger monedas y recompensas para actualizar su avión, siga estos pasos:

    - -

    Conclusión

    -

    Air Game APK es una colección de dos increíbles juegos para Android que le permiten jugar juegos multijugador en su dispositivo. AirConsole es una consola de videojuegos multijugador que te permite jugar juegos en tu Android TV, Amazon Fire TV, Tablet u Computer como consola, y usar tus teléfonos inteligentes como controladores. 1945 Air Force es un clásico juego de disparos de aviones que te lleva de vuelta a la era de la Segunda Guerra Mundial. Puede elegir entre más de 200 aviones de diferentes países y personalizarlos con diversas armas y equipos. También puedes unirte a un escuadrón y luchar contra enemigos en solitario o en equipo. Air Game APK es una gran manera de disfrutar de los juegos con sus amigos y familiares sin comprar consolas o controladores caros. Si desea jugar juegos multijugador en su dispositivo Android, debe descargar e instalar Air Game APK hoy.

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes sobre Air Game APK:

    -
      -
    1. ¿Cuáles son los requisitos para jugar Air Game APK?
    2. - -
    3. ¿Es el juego de aire APK seguro y legal?
    4. -

      Sí, Air Game APK es seguro y legal. Los juegos son desarrollados por empresas de renombre y son verificados por Google Play Store. Los juegos no contienen virus, malware o contenido ilegal. Sin embargo, siempre debes descargar los juegos de las fuentes oficiales y no de sitios web de terceros.

      -
    5. ¿Cuánto cuesta juego de aire APK?
    6. -

      Air Game APK es gratis para descargar y jugar. Sin embargo, algunos juegos pueden ofrecer compras en la aplicación o anuncios que puedes comprar o ver para apoyar a los desarrolladores. También puede comprar gemas o dinero real para comprar artículos premium para su avión en 1945 Fuerza Aérea.

      -
    7. ¿Cuántos jugadores pueden jugar Air Game APK?
    8. -

      El número de jugadores que pueden jugar Air Game APK depende del juego y el modo que usted elija. Para AirConsole, puedes jugar con hasta 16 jugadores en una pantalla o hasta 32 jugadores en línea. Para la Fuerza Aérea 1945, puedes jugar con hasta 4 jugadores en modo equipo o hasta 8 jugadores en modo evento.

      -
    9. ¿Cuáles son algunos de los mejores juegos para jugar en Air Game APK?
    10. -

      Algunos de los mejores juegos para jugar en Air Game APK son:

      -
        -
      • AirConsole: Cartas y humanidad, Racing Wars, Silly World Series, Torre de Babel, El barrio, etc.
      • -
      • 1945 Air Force: Sky Force Reloaded, Strikers 1945, Raiden Legacy, Sky Gamblers: Storm Raiders, etc.
      • -
      -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Apk Cinco Noches En Freddy Y 39 S 2.md b/spaces/Benson/text-generation/Examples/Apk Cinco Noches En Freddy Y 39 S 2.md deleted file mode 100644 index 3926284142e077f2c00871b467eca08aa71df3c4..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Apk Cinco Noches En Freddy Y 39 S 2.md +++ /dev/null @@ -1,100 +0,0 @@ - -

    APK cinco noches en Freddy’s 2: Un juego de terror para Android

    -

    ¿Te gustan los juegos de terror? ¿Te gusta que te asusten los animatrónicos espeluznantes? ¿Quieres experimentar la emoción de sobrevivir cinco noches como guardia nocturno en una pizzería embrujada? Si respondiste sí a cualquiera de estas preguntas, entonces usted debe probar APK Five Nights at Freddy’s 2, un juego de terror para dispositivos Android que le mantendrá en el borde de su asiento.

    -

    Introducción

    -

    En este artículo, le diremos todo lo que necesita saber sobre APK Five Nights at Freddy’s 2, incluyendo lo que es, cómo descargar e instalar, y cómo jugarlo. También te daremos algunos consejos y trucos para ayudarte a sobrevivir la noche y evitar ser asesinado por los monstruos mecánicos. Pero antes de entrar en eso, primero vamos a explicar lo que es Five Nights at Freddy’s 2 y lo que es un archivo APK.

    -

    apk cinco noches en freddy y 39; s 2


    DOWNLOADhttps://bltlly.com/2v6MTn



    -

    ¿Qué es cinco noches en Freddy’s 2?

    -

    Five Nights at Freddy’s 2 es un juego de terror desarrollado por Scott Cawthon y lanzado en 2014. Es la secuela del juego original de Five Nights at Freddy, que también fue lanzado en 2014. El juego se desarrolla en una pizzería ficticia llamada Freddy Fazbear’s Pizza, donde la principal atracción es un grupo de personajes animatrónicos que entretienen a los niños durante el día. Sin embargo, por la noche, estos animatrónicos se vuelven hostiles e intentan matar a cualquiera que se quede en el edificio.

    -

    El juego te pone en el papel de un guardia nocturno que tiene que trabajar cinco noches (de 12 a 6 AM) en la pizzería. Sus únicas herramientas son un sistema de cámaras de seguridad, una linterna y una máscara que puede engañar a algunos de los animatrónicos. Usted tiene que controlar las cámaras y tener cuidado con cualquier movimiento o signos de peligro. Si ves a un animatrónico acercándose a tu oficina, tienes que cerrar la puerta, encender la luz o ponerte la máscara para disuadirlos. Si no lo haces, te asustarán y terminarán tu juego.

    - -

    ¿Qué es un archivo APK y por qué lo necesita?

    -

    Un archivo APK es un formato de archivo que se utiliza para distribuir e instalar aplicaciones en dispositivos Android. APK significa Android Package Kit, y contiene todos los archivos y datos que se necesitan para que una aplicación se ejecute en su dispositivo. Puedes descargar archivos APK de varias fuentes en línea, como tiendas de aplicaciones oficiales, sitios web de terceros o plataformas para compartir archivos.

    -

    Es posible que necesite un archivo APK por varias razones. Por ejemplo, es posible que desee instalar una aplicación que no esté disponible en su región o en su modelo de dispositivo. También es posible que desee instalar una versión anterior de una aplicación que se ha actualizado o eliminado de la tienda de aplicaciones. O simplemente puede probar una aplicación nueva o no lanzada que aún no se haya lanzado oficialmente.

    -

    Sin embargo, descargar e instalar archivos APK también viene con algunos riesgos. Por ejemplo, puede encontrarse con malware o virus que pueden dañar su dispositivo o robar su información personal. También puede violar algunos términos

    de las políticas de servicio o privacidad del desarrollador de aplicaciones o la tienda de aplicaciones. También es posible que tenga problemas de compatibilidad o de rendimiento con su dispositivo o la propia aplicación. Por lo tanto, siempre debe tener cuidado y precaución al descargar e instalar archivos APK, y solo hacerlo desde fuentes confiables y de buena reputación.

    -

    -

    Cómo descargar e instalar APK Five Nights at Freddy’s 2

    -

    Si desea jugar APK Five Nights at Freddy’s 2 en su dispositivo Android, tendrá que descargar e instalar el archivo APK para el juego. Estos son los pasos que debes seguir:

    -

    Paso 1: Habilitar fuentes desconocidas en el dispositivo

    -

    Antes de que pueda instalar cualquier archivo APK en su dispositivo, debe habilitar la opción para permitir la instalación desde fuentes desconocidas. Esta opción suele estar deshabilitada de forma predeterminada por razones de seguridad, pero puede activarla fácilmente siguiendo estos pasos:

    - -

    Una vez que haya habilitado fuentes desconocidas, puede pasar al siguiente paso.

    -

    Paso 2: Descargar el archivo APK de una fuente de confianza

    -

    El siguiente paso es descargar el archivo APK para APK Five Nights at Freddy’s 2 de una fuente confiable y confiable. Puede utilizar cualquier navegador web en su dispositivo para hacer esto, pero asegúrese de que está utilizando una conexión segura y rápida. Estas son algunas de las fuentes que recomendamos:

    - -

    Una vez que haya elegido una fuente, siga estos pasos para descargar el archivo APK:

    - -

    Una vez que haya descargado el archivo APK, puede pasar al siguiente paso.

    Paso 3: Instalar el archivo APK en su dispositivo

    -

    El paso final es instalar el archivo APK en su dispositivo. Este es un proceso simple y rápido, pero debe tener cuidado y seguir las instrucciones cuidadosamente. Estos son los pasos que debe seguir:

    - -

    Felicidades! Usted ha instalado con éxito APK Five Nights at Freddy’s 2 en su dispositivo. Ahora se puede disfrutar de jugar el juego y divertirse.

    -

    Cómo jugar APK Cinco Noches en Freddy 2

    -

    Ahora que ha instalado APK Five Nights at Freddy’s 2 en su dispositivo, es posible que se pregunte cómo jugarlo. No te preocupes, te tenemos cubierto. En esta sección, explicaremos la jugabilidad y los controles, los personajes y las ubicaciones, y algunos consejos y trucos para ayudarte a sobrevivir la noche y evitar ser asesinado por los animatrónicos.

    -

    El juego y los controles

    - -

    Los controles de APK Five Nights at Freddy’s 2 son simples e intuitivos. Puede utilizar su dedo para deslizar a través de la pantalla para mirar alrededor de su oficina o cambiar entre las cámaras. También puede tocar en la pantalla para interactuar con varios objetos, como puertas, luces o máscaras. También puedes usar los botones de volumen de tu dispositivo para ajustar el nivel de sonido del juego.

    -

    Los caracteres y las ubicaciones

    -

    Los personajes de APK Five Nights at Freddy’s 2 son 11 animatronics que tienen diferentes apariciones y comportamientos. Algunos de ellos son nuevas versiones de los personajes originales del primer juego, mientras que otros son modelos viejos y dañados que se han almacenado lejos. Aquí hay una breve descripción de cada carácter y su ubicación:

    - -NombreDescripciónUbicación -Freddy FazbearLa mascota principal de Freddy Fazbear’s Pizza. Es un oso marrón con sombrero negro y pajarita. Suele estar inactivo durante las primeras noches, pero se vuelve más activo y agresivo a medida que avanzan las noches. Puede ser engañado por la máscara, pero también puede colarse en su oficina sin ser visto por las cámaras. Comienza en Show Stage con Bonnie y Chica, luego se muda a Party Room 3, Party Room 4, Main Hall o Right Air Vent. -BonnieUn conejo azul con una pajarita roja. Es uno de los amigos y compañeros de banda de Freddy. Él es muy activo y agresivo durante todas las noches. Puede ser engañado por la máscara, pero también puede entrar en su oficina a través de cualquiera de los respiraderos de aire. Comienza en Show Stage con Freddy y Chica, luego se muda a Party Room 1, Party Room 2, Left Air Vent o Right Air Vent. - -FoxyUn zorro rojo con un parche y un gancho para una mano. Es un animatrónico de temática pirata que estaba fuera de servicio en el primer juego. Todavía está roto y dañado, pero todavía puede moverse y atacar. No puede ser engañado por la máscara, pero puede ser detenido por la linterna. Es muy rápido e impredecible, y puede aparecer en cualquier momento. Comienza en Piezas/Servicio con los antiguos animatrónicos, luego se mueve a Main Hall, Party Room 3, o The Office. -MangleUn zorro blanco y rosa que originalmente era una nueva versión de Foxy, pero fue destrozado por los niños y se convirtió en un lío destrozado de cables y piezas. Ahora es una atracción para desmontar y volver a unir que puede arrastrarse por el techo y las paredes. No puede ser engañada por la máscara, pero puede ser detenida por la linterna. También es muy rápida e impredecible, y puede aparecer en cualquier momento. Ella comienza en Kid’s Cove, luego se muda a Main Hall, Party Room 9, Party Room 4, o Right Air Vent. -Balloon BoyUn pequeño animatrónico humano que lleva una camisa rayada roja y azul, pantalones azules, un sombrero de hélice y sostiene un globo y un cartel que dice "¡Globos!". Es un personaje amigable y alegre que no te ataca directamente, pero puede desactivar tu linterna y hacerte vulnerable a Foxy o Mangle. Puede ser engañado por la máscara, pero también puede entrar a su oficina a través de cualquiera de los respiraderos de aire. Comienza en el área de juego, luego se mueve a la ventilación de aire izquierda o la ventilación de aire derecha. - -Toy FreddyUna nueva versión de Freddy Fazbear que es más moderna y elegante. Es un oso marrón con ojos azules, sombrero negro, corbata de moño y mejillas sonrosadas. Es menos activo y agresivo que el Freddy original, pero todavía puede representar una amenaza. Puede ser engañado por la máscara, pero también puede entrar a su oficina a través de la sala principal. Comienza en Show Stage con Toy Bonnie y Toy Chica, luego se mueve a Área de juego, Sala de fiestas 3, Sala principal o La oficina. -Toy BonnieUna nueva versión de Bonnie que es más moderna y elegante. Es un conejo azul con ojos verdes, una pajarita roja y mejillas sonrosadas. Es más activo y agresivo que el Bonnie original, y puede moverse más rápido. Puede ser engañado por la máscara, pero también puede entrar a su oficina a través de la ventilación izquierda. Comienza en Show Stage con Toy Freddy y Toy Chica, luego se mueve a Área de Juego, Sala de Fiestas 2, Ventilación de Aire Izquierda o La Oficina. -Toy ChicaUna nueva versión de Chica que es más moderna y elegante. Ella es una gallina amarilla con ojos morados, un babero rosa que dice "¡Vamos de fiesta!" y mejillas rosadas. También lleva un cupcake en un plato como accesorio. Ella es más activa y agresiva que la Chica original, y puede moverse más rápido. Ella puede ser engañada por la máscara, pero también puede entrar a su oficina a través de la ventilación correcta. Ella comienza en Show Stage con Toy Freddy y Toy Bonnie, luego se mueve a Game Área, Party Room 4, Right Air Vent o The Office. -Golden FreddyUna versión dorada de Freddy Fazbear que aparece como una alucinación o un fantasma. Es un personaje de huevo de Pascua que no tiene una ubicación o patrón fijo. Puede aparecer aleatoriamente en tu pantalla o en tu oficina, haciendo que tu juego se bloquee o termine. No puede ser engañado por la máscara ni detenido por nada. Es muy raro y difícil de encontrar. No tiene una ubicación. Puede aparecer en cualquier lugar en cualquier momento. - - -

    Los consejos y trucos de APK Five Nights at Freddy’s 2 son algunas estrategias y consejos y trucos que pueden ayudar a sobrevivir a la noche y evitar ser asesinado por los animatrónicos. Estos son algunos de ellos:

    - -

    Conclusión

    - -

    Si desea jugar APK Five Nights at Freddy’s 2 en su dispositivo, usted tiene que descargar e instalar el archivo APK de una fuente de confianza. También debe habilitar fuentes desconocidas en su dispositivo y seguir las instrucciones cuidadosamente. Una vez instalado el juego, podrás disfrutar jugando y divirtiéndote.

    -

    Sin embargo, se advierte: este juego no es para los débiles de corazón o los fácilmente asustados. Este juego es muy desafiante y aterrador, y te hará gritar y saltar de tu asiento. Si usted está buscando una emoción y un desafío, entonces este juego es para usted. Pero si estás buscando un juego relajante y pacífico, entonces este juego no es para ti.

    -

    Entonces, ¿estás listo para enfrentar tus miedos y jugar APK Five Nights at Freddy’s 2? Si lo estás, entonces descarga el juego ahora y comienza a jugar. Pero si no lo estás, entonces quizás deberías buscar otro juego. La elección es tuya.

    -

    Gracias por leer este artículo. Esperamos que le resulte útil e informativo. Si tiene alguna pregunta o comentario, por favor siéntase libre de dejarlos abajo. Nos encantaría saber de usted.

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes sobre APK Five Nights at Freddy’s 2:

    -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Amor Enredo Mod Apk.md b/spaces/Benson/text-generation/Examples/Descargar Amor Enredo Mod Apk.md deleted file mode 100644 index 1560c46683b631641ad9c766e2ce8bd72eba4f99..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Amor Enredo Mod Apk.md +++ /dev/null @@ -1,50 +0,0 @@ - -

    Descargar Amor enredo Mod Apk: Un juego de aventura romántica

    -

    ¿Te gustan los juegos románticos con hermosos gráficos e historias atractivas? Si es así, entonces deberías probar Love Tangle, un popular juego de aventuras desarrollado por Shall we date? En este juego, puedes crear tu propio avatar, elegir tu interés amoroso entre una variedad de personajes y disfrutar de los emocionantes episodios y finales. Pero lo que si desea desbloquear todas las características y el contenido del juego sin gastar dinero o esperar horas? Bueno, hay una solución para eso: descargar Love Tangle mod apk!

    -

    ¿Qué es la maraña del amor?

    -

    Love Tangle es un juego de aventura gratuito que te permite experimentar una historia romántica en un apartamento de lujo. Usted es un nuevo residente en el apartamento, donde se encuentra con muchos personajes atractivos y misteriosos. Puedes interactuar con ellos, coquetear con ellos y enamorarte de ellos. También puedes personalizar tu avatar con varios atuendos, accesorios y peinados. El juego tiene gráficos impresionantes, voz actuando y efectos de sonido que te hacen sentir como si fueras parte de la historia.

    -

    descargar amor enredo mod apk


    Download Filehttps://bltlly.com/2v6LbA



    -

    Características de Love Tangle

    -

    Personaliza tu avatar

    -

    Puedes crear tu propio avatar eligiendo entre diferentes características faciales, tonos de piel, colores de cabello y estilos. También puedes vestir a tu avatar con ropa, zapatos, joyas y accesorios. Puedes cambiar tu look cuando quieras e impresionar tu interés amoroso con tu estilo.

    -

    Elige tu interés amoroso

    -

    Puedes elegir entre una variedad de personajes hasta la fecha en Love Tangle. Cada personaje tiene su propia personalidad, antecedentes e historia. Puedes conocerlos mejor hablando con ellos, pasando tiempo con ellos y tomando decisiones que afectan tu relación. También puede cambiar entre diferentes caracteres y explorar diferentes rutas y finales.

    -

    Disfruta de la historia y los gráficos

    - -

    ¿Por qué descargar Love Tangle mod apk?

    -

    Love Tangle es un juego divertido y emocionante, pero también tiene algunas limitaciones que pueden afectar su disfrute. Por ejemplo, necesitas gastar monedas o boletos para desbloquear nuevos episodios o finales. También necesitas ver anuncios o esperar horas para recargar tu energía o entradas. Estas restricciones pueden ser frustrantes y molestas, especialmente si quieres progresar más rápido o ver todo el contenido del juego.

    -

    Beneficios de Love Tangle mod apk

    -

    Desbloquear todos los episodios y finales

    -

    Con Love Tangle mod apk, puede desbloquear todos los episodios y finales del juego sin gastar monedas o entradas. Puede acceder a cualquier episodio o final que desee en cualquier momento que desee. También puedes ver todos los resultados posibles de tus elecciones y descubrir todos los secretos y sorpresas del juego.

    -

    Consigue monedas y billetes ilimitados

    -

    Con Love Tangle mod apk, puede obtener monedas ilimitadas y entradas que se pueden utilizar para comprar artículos o acceder a las características en el juego. Puedes comprar cualquier atuendo o accesorio que quieras para tu avatar o tu interés amoroso. También puede usar tickets para saltar anuncios o acelerar el tiempo de recarga de energía.

    -

    Eliminar anuncios y disfrutar del juego

    -

    Con Love Tangle mod apk, puede eliminar todos los anuncios que interrumpen su juego o te hacen esperar durante horas. Puedes disfrutar del juego sin distracciones ni retrasos. También puedes guardar tus datos y batería al no cargar ni ver anuncios.

    -

    Cómo descargar e instalar Love Tangle mod apk?

    -

    Si usted está interesado en descargar e instalar Love Tangle mod apk, es necesario seguir algunos pasos simples. Estos son los pasos que debes seguir:

    -

    Pasos para descargar e instalar Love Tangle mod apk

    -

    Descargar el archivo apk mod de una fuente de confianza

    - -

    Habilitar fuentes desconocidas en su dispositivo

    -

    El segundo paso es habilitar fuentes desconocidas en su dispositivo. Esto es necesario porque el archivo apk mod no es de la tienda oficial de Google Play y su dispositivo puede bloquear su instalación. Para habilitar fuentes desconocidas, vaya a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y enciéndala. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store.

    -

    -

    Instalar el archivo apk mod y lanzar el juego

    -

    El tercer y último paso es instalar el archivo apk mod y lanzar el juego. Para instalar el archivo apk mod, localizarlo en el administrador de archivos de su dispositivo o carpeta de descargas y toque en él. Siga las instrucciones de la pantalla y espere a que se complete la instalación. Una vez finalizada la instalación, puedes lanzar el juego desde el cajón de tu app o la pantalla de inicio. ¡Disfruta jugando a Love Tangle con todas las funciones de mod!

    -

    Conclusión

    -

    Love Tangle es un juego de aventura romántica que te permite crear tu propio avatar, elegir tu interés amoroso y disfrutar de la historia y los gráficos. Sin embargo, si desea desbloquear todas las características y el contenido del juego sin gastar dinero o esperar horas, usted debe descargar Love Tangle mod apk. Con Love Tangle mod apk, puede desbloquear todos los episodios y finales, obtener monedas ilimitadas y entradas, y eliminar los anuncios y disfrutar del juego. Puede descargar e instalar Love Tangle mod apk siguiendo algunos pasos simples. Descargar Love Tangle mod apk hoy y divertirse!

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes sobre Love Tangle mod apk:

    -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/parser/isoparser.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/parser/isoparser.py deleted file mode 100644 index 5d7bee38006d4e510b841d84df0322dee024b77c..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/parser/isoparser.py +++ /dev/null @@ -1,416 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a parser for ISO-8601 strings - -It is intended to support all valid date, time and datetime formats per the -ISO-8601 specification. - -..versionadded:: 2.7.0 -""" -from datetime import datetime, timedelta, time, date -import calendar -from dateutil import tz - -from functools import wraps - -import re -import six - -__all__ = ["isoparse", "isoparser"] - - -def _takes_ascii(f): - @wraps(f) - def func(self, str_in, *args, **kwargs): - # If it's a stream, read the whole thing - str_in = getattr(str_in, 'read', lambda: str_in)() - - # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII - if isinstance(str_in, six.text_type): - # ASCII is the same in UTF-8 - try: - str_in = str_in.encode('ascii') - except UnicodeEncodeError as e: - msg = 'ISO-8601 strings should contain only ASCII characters' - six.raise_from(ValueError(msg), e) - - return f(self, str_in, *args, **kwargs) - - return func - - -class isoparser(object): - def __init__(self, sep=None): - """ - :param sep: - A single character that separates date and time portions. If - ``None``, the parser will accept any single character. - For strict ISO-8601 adherence, pass ``'T'``. - """ - if sep is not None: - if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): - raise ValueError('Separator must be a single, non-numeric ' + - 'ASCII character') - - sep = sep.encode('ascii') - - self._sep = sep - - @_takes_ascii - def isoparse(self, dt_str): - """ - Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. - - An ISO-8601 datetime string consists of a date portion, followed - optionally by a time portion - the date and time portions are separated - by a single character separator, which is ``T`` in the official - standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be - combined with a time portion. - - Supported date formats are: - - Common: - - - ``YYYY`` - - ``YYYY-MM`` or ``YYYYMM`` - - ``YYYY-MM-DD`` or ``YYYYMMDD`` - - Uncommon: - - - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) - - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day - - The ISO week and day numbering follows the same logic as - :func:`datetime.date.isocalendar`. - - Supported time formats are: - - - ``hh`` - - ``hh:mm`` or ``hhmm`` - - ``hh:mm:ss`` or ``hhmmss`` - - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits) - - Midnight is a special case for `hh`, as the standard supports both - 00:00 and 24:00 as a representation. The decimal separator can be - either a dot or a comma. - - - .. caution:: - - Support for fractional components other than seconds is part of the - ISO-8601 standard, but is not currently implemented in this parser. - - Supported time zone offset formats are: - - - `Z` (UTC) - - `±HH:MM` - - `±HHMM` - - `±HH` - - Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, - with the exception of UTC, which will be represented as - :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such - as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. - - :param dt_str: - A string or stream containing only an ISO-8601 datetime string - - :return: - Returns a :class:`datetime.datetime` representing the string. - Unspecified components default to their lowest value. - - .. warning:: - - As of version 2.7.0, the strictness of the parser should not be - considered a stable part of the contract. Any valid ISO-8601 string - that parses correctly with the default settings will continue to - parse correctly in future versions, but invalid strings that - currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not - guaranteed to continue failing in future versions if they encode - a valid date. - - .. versionadded:: 2.7.0 - """ - components, pos = self._parse_isodate(dt_str) - - if len(dt_str) > pos: - if self._sep is None or dt_str[pos:pos + 1] == self._sep: - components += self._parse_isotime(dt_str[pos + 1:]) - else: - raise ValueError('String contains unknown ISO components') - - if len(components) > 3 and components[3] == 24: - components[3] = 0 - return datetime(*components) + timedelta(days=1) - - return datetime(*components) - - @_takes_ascii - def parse_isodate(self, datestr): - """ - Parse the date portion of an ISO string. - - :param datestr: - The string portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.date` object - """ - components, pos = self._parse_isodate(datestr) - if pos < len(datestr): - raise ValueError('String contains unknown ISO ' + - 'components: {!r}'.format(datestr.decode('ascii'))) - return date(*components) - - @_takes_ascii - def parse_isotime(self, timestr): - """ - Parse the time portion of an ISO string. - - :param timestr: - The time portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.time` object - """ - components = self._parse_isotime(timestr) - if components[0] == 24: - components[0] = 0 - return time(*components) - - @_takes_ascii - def parse_tzstr(self, tzstr, zero_as_utc=True): - """ - Parse a valid ISO time zone string. - - See :func:`isoparser.isoparse` for details on supported formats. - - :param tzstr: - A string representing an ISO time zone offset - - :param zero_as_utc: - Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones - - :return: - Returns :class:`dateutil.tz.tzoffset` for offsets and - :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is - specified) offsets equivalent to UTC. - """ - return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) - - # Constants - _DATE_SEP = b'-' - _TIME_SEP = b':' - _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)') - - def _parse_isodate(self, dt_str): - try: - return self._parse_isodate_common(dt_str) - except ValueError: - return self._parse_isodate_uncommon(dt_str) - - def _parse_isodate_common(self, dt_str): - len_str = len(dt_str) - components = [1, 1, 1] - - if len_str < 4: - raise ValueError('ISO string too short') - - # Year - components[0] = int(dt_str[0:4]) - pos = 4 - if pos >= len_str: - return components, pos - - has_sep = dt_str[pos:pos + 1] == self._DATE_SEP - if has_sep: - pos += 1 - - # Month - if len_str - pos < 2: - raise ValueError('Invalid common month') - - components[1] = int(dt_str[pos:pos + 2]) - pos += 2 - - if pos >= len_str: - if has_sep: - return components, pos - else: - raise ValueError('Invalid ISO format') - - if has_sep: - if dt_str[pos:pos + 1] != self._DATE_SEP: - raise ValueError('Invalid separator in ISO string') - pos += 1 - - # Day - if len_str - pos < 2: - raise ValueError('Invalid common day') - components[2] = int(dt_str[pos:pos + 2]) - return components, pos + 2 - - def _parse_isodate_uncommon(self, dt_str): - if len(dt_str) < 4: - raise ValueError('ISO string too short') - - # All ISO formats start with the year - year = int(dt_str[0:4]) - - has_sep = dt_str[4:5] == self._DATE_SEP - - pos = 4 + has_sep # Skip '-' if it's there - if dt_str[pos:pos + 1] == b'W': - # YYYY-?Www-?D? - pos += 1 - weekno = int(dt_str[pos:pos + 2]) - pos += 2 - - dayno = 1 - if len(dt_str) > pos: - if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: - raise ValueError('Inconsistent use of dash separator') - - pos += has_sep - - dayno = int(dt_str[pos:pos + 1]) - pos += 1 - - base_date = self._calculate_weekdate(year, weekno, dayno) - else: - # YYYYDDD or YYYY-DDD - if len(dt_str) - pos < 3: - raise ValueError('Invalid ordinal day') - - ordinal_day = int(dt_str[pos:pos + 3]) - pos += 3 - - if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): - raise ValueError('Invalid ordinal day' + - ' {} for year {}'.format(ordinal_day, year)) - - base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) - - components = [base_date.year, base_date.month, base_date.day] - return components, pos - - def _calculate_weekdate(self, year, week, day): - """ - Calculate the day of corresponding to the ISO year-week-day calendar. - - This function is effectively the inverse of - :func:`datetime.date.isocalendar`. - - :param year: - The year in the ISO calendar - - :param week: - The week in the ISO calendar - range is [1, 53] - - :param day: - The day in the ISO calendar - range is [1 (MON), 7 (SUN)] - - :return: - Returns a :class:`datetime.date` - """ - if not 0 < week < 54: - raise ValueError('Invalid week: {}'.format(week)) - - if not 0 < day < 8: # Range is 1-7 - raise ValueError('Invalid weekday: {}'.format(day)) - - # Get week 1 for the specific year: - jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it - week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) - - # Now add the specific number of weeks and days to get what we want - week_offset = (week - 1) * 7 + (day - 1) - return week_1 + timedelta(days=week_offset) - - def _parse_isotime(self, timestr): - len_str = len(timestr) - components = [0, 0, 0, 0, None] - pos = 0 - comp = -1 - - if len_str < 2: - raise ValueError('ISO time too short') - - has_sep = False - - while pos < len_str and comp < 5: - comp += 1 - - if timestr[pos:pos + 1] in b'-+Zz': - # Detect time zone boundary - components[-1] = self._parse_tzstr(timestr[pos:]) - pos = len_str - break - - if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP: - has_sep = True - pos += 1 - elif comp == 2 and has_sep: - if timestr[pos:pos+1] != self._TIME_SEP: - raise ValueError('Inconsistent use of colon separator') - pos += 1 - - if comp < 3: - # Hour, minute, second - components[comp] = int(timestr[pos:pos + 2]) - pos += 2 - - if comp == 3: - # Fraction of a second - frac = self._FRACTION_REGEX.match(timestr[pos:]) - if not frac: - continue - - us_str = frac.group(1)[:6] # Truncate to microseconds - components[comp] = int(us_str) * 10**(6 - len(us_str)) - pos += len(frac.group()) - - if pos < len_str: - raise ValueError('Unused components in ISO string') - - if components[0] == 24: - # Standard supports 00:00 and 24:00 as representations of midnight - if any(component != 0 for component in components[1:4]): - raise ValueError('Hour may only be 24 at 24:00:00.000') - - return components - - def _parse_tzstr(self, tzstr, zero_as_utc=True): - if tzstr == b'Z' or tzstr == b'z': - return tz.UTC - - if len(tzstr) not in {3, 5, 6}: - raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') - - if tzstr[0:1] == b'-': - mult = -1 - elif tzstr[0:1] == b'+': - mult = 1 - else: - raise ValueError('Time zone offset requires sign') - - hours = int(tzstr[1:3]) - if len(tzstr) == 3: - minutes = 0 - else: - minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) - - if zero_as_utc and hours == 0 and minutes == 0: - return tz.UTC - else: - if minutes > 59: - raise ValueError('Invalid minutes in time zone offset') - - if hours > 23: - raise ValueError('Invalid hours in time zone offset') - - return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) - - -DEFAULT_ISOPARSER = isoparser() -isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/spaces/Bokanovskii/Image-to-music/style.css b/spaces/Bokanovskii/Image-to-music/style.css deleted file mode 100644 index 9d8a0dde75eb91dcc7ebbc20280941d29bda0764..0000000000000000000000000000000000000000 --- a/spaces/Bokanovskii/Image-to-music/style.css +++ /dev/null @@ -1,42 +0,0 @@ -#col-container { - max-width: 510px; - margin-left: auto; - margin-right: auto; -} -a { - text-decoration-line: underline; - font-weight: 600; -} -div#app-output .h-full { - min-height: 5rem; -} -.footer { - margin-bottom: 45px; - margin-top: 10px; - text-align: center; - border-bottom: 1px solid #e5e5e5; -} -.footer > p { - font-size: 0.8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; -} -.dark .footer { - border-color: #303030; -} -.dark .footer > p { - background: #0b0f19; -} -.animate-spin { - animation: spin 1s linear infinite; -} -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/lvis.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/lvis.py deleted file mode 100644 index 765ec7494a1b16526f588ee9f71658779ce936eb..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/lvis.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -import os -from fvcore.common.file_io import PathManager -from fvcore.common.timer import Timer - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.structures import BoxMode - -from .builtin_meta import _get_coco_instances_meta -from .lvis_v0_5_categories import LVIS_CATEGORIES - -""" -This file contains functions to parse LVIS-format annotations into dicts in the -"Detectron2 format". -""" - -logger = logging.getLogger(__name__) - -__all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"] - - -def register_lvis_instances(name, metadata, json_file, image_root): - """ - Register a dataset in LVIS's json annotation format for instance detection and segmentation. - - Args: - name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train". - metadata (dict): extra metadata associated with this dataset. It can be an empty dict. - json_file (str): path to the json instance annotation file. - image_root (str or path-like): directory which contains all the images. - """ - DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name)) - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata - ) - - -def load_lvis_json(json_file, image_root, dataset_name=None): - """ - Load a json file in LVIS's annotation format. - - Args: - json_file (str): full path to the LVIS json annotation file. - image_root (str): the directory where the images in this json file exists. - dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train"). - If provided, this function will put "thing_classes" into the metadata - associated with this dataset. - - Returns: - list[dict]: a list of dicts in Detectron2 standard format. (See - `Using Custom Datasets `_ ) - - Notes: - 1. This function does not read the image files. - The results do not have the "image" field. - """ - from lvis import LVIS - - json_file = PathManager.get_local_path(json_file) - - timer = Timer() - lvis_api = LVIS(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) - - if dataset_name is not None: - meta = get_lvis_instances_meta(dataset_name) - MetadataCatalog.get(dataset_name).set(**meta) - - # sort indices for reproducible results - img_ids = sorted(lvis_api.imgs.keys()) - # imgs is a list of dicts, each looks something like: - # {'license': 4, - # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', - # 'file_name': 'COCO_val2014_000000001268.jpg', - # 'height': 427, - # 'width': 640, - # 'date_captured': '2013-11-17 05:57:24', - # 'id': 1268} - imgs = lvis_api.load_imgs(img_ids) - # anns is a list[list[dict]], where each dict is an annotation - # record for an object. The inner list enumerates the objects in an image - # and the outer list enumerates over images. Example of anns[0]: - # [{'segmentation': [[192.81, - # 247.09, - # ... - # 219.03, - # 249.06]], - # 'area': 1035.749, - # 'image_id': 1268, - # 'bbox': [192.81, 224.8, 74.73, 33.43], - # 'category_id': 16, - # 'id': 42986}, - # ...] - anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] - - # Sanity check that each annotation has a unique id - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format( - json_file - ) - - imgs_anns = list(zip(imgs, anns)) - - logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file)) - - dataset_dicts = [] - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - file_name = img_dict["file_name"] - if img_dict["file_name"].startswith("COCO"): - # Convert form the COCO 2014 file naming convention of - # COCO_[train/val/test]2014_000000000000.jpg to the 2017 naming convention of - # 000000000000.jpg (LVIS v1 will fix this naming issue) - file_name = file_name[-16:] - record["file_name"] = os.path.join(image_root, file_name) - record["height"] = img_dict["height"] - record["width"] = img_dict["width"] - record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", []) - record["neg_category_ids"] = img_dict.get("neg_category_ids", []) - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - # Check that the image_id in this annotation is the same as - # the image_id we're looking at. - # This fails only when the data parsing logic or the annotation file is buggy. - assert anno["image_id"] == image_id - obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} - obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed - segm = anno["segmentation"] # list[list[float]] - # filter out invalid polygons (< 3 points) - valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] - assert len(segm) == len( - valid_segm - ), "Annotation contains an invalid polygon with < 3 points" - assert len(segm) > 0 - obj["segmentation"] = segm - objs.append(obj) - record["annotations"] = objs - dataset_dicts.append(record) - - return dataset_dicts - - -def get_lvis_instances_meta(dataset_name): - """ - Load LVIS metadata. - - Args: - dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5"). - - Returns: - dict: LVIS metadata with keys: thing_classes - """ - if "cocofied" in dataset_name: - return _get_coco_instances_meta() - if "v0.5" in dataset_name: - return _get_lvis_instances_meta_v0_5() - # There will be a v1 in the future - # elif dataset_name == "lvis_v1": - # return get_lvis_instances_meta_v1() - raise ValueError("No built-in metadata for dataset {}".format(dataset_name)) - - -def _get_lvis_instances_meta_v0_5(): - assert len(LVIS_CATEGORIES) == 1230 - cat_ids = [k["id"] for k in LVIS_CATEGORIES] - assert min(cat_ids) == 1 and max(cat_ids) == len( - cat_ids - ), "Category ids are not in [1, #categories], as expected" - # Ensure that the category list is sorted by id - lvis_categories = sorted(LVIS_CATEGORIES, key=lambda x: x["id"]) - thing_classes = [k["synonyms"][0] for k in lvis_categories] - meta = {"thing_classes": thing_classes} - return meta - - -if __name__ == "__main__": - """ - Test the LVIS json dataset loader. - - Usage: - python -m detectron2.data.datasets.lvis \ - path/to/json path/to/image_root dataset_name vis_limit - """ - import sys - import numpy as np - from detectron2.utils.logger import setup_logger - from PIL import Image - import detectron2.data.datasets # noqa # add pre-defined metadata - from detectron2.utils.visualizer import Visualizer - - logger = setup_logger(name=__name__) - meta = MetadataCatalog.get(sys.argv[3]) - - dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3]) - logger.info("Done loading {} samples.".format(len(dicts))) - - dirname = "lvis-data-vis" - os.makedirs(dirname, exist_ok=True) - for d in dicts[: int(sys.argv[4])]: - img = np.array(Image.open(d["file_name"])) - visualizer = Visualizer(img, metadata=meta) - vis = visualizer.draw_dataset_dict(d) - fpath = os.path.join(dirname, os.path.basename(d["file_name"])) - vis.save(fpath) diff --git a/spaces/CVPR/LIVE/pybind11/tests/test_sequences_and_iterators.py b/spaces/CVPR/LIVE/pybind11/tests/test_sequences_and_iterators.py deleted file mode 100644 index 8f6c0c4bbdf71bb45759d83a630f910a4f117ecd..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pybind11/tests/test_sequences_and_iterators.py +++ /dev/null @@ -1,191 +0,0 @@ -# -*- coding: utf-8 -*- -import pytest -from pybind11_tests import sequences_and_iterators as m -from pybind11_tests import ConstructorStats - - -def isclose(a, b, rel_tol=1e-05, abs_tol=0.0): - """Like math.isclose() from Python 3.5""" - return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) - - -def allclose(a_list, b_list, rel_tol=1e-05, abs_tol=0.0): - return all(isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) for a, b in zip(a_list, b_list)) - - -def test_generalized_iterators(): - assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero()) == [(1, 2), (3, 4)] - assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero()) == [(1, 2)] - assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero()) == [] - - assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero_keys()) == [1, 3] - assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero_keys()) == [1] - assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero_keys()) == [] - - # __next__ must continue to raise StopIteration - it = m.IntPairs([(0, 0)]).nonzero() - for _ in range(3): - with pytest.raises(StopIteration): - next(it) - - it = m.IntPairs([(0, 0)]).nonzero_keys() - for _ in range(3): - with pytest.raises(StopIteration): - next(it) - - -def test_sliceable(): - sliceable = m.Sliceable(100) - assert sliceable[::] == (0, 100, 1) - assert sliceable[10::] == (10, 100, 1) - assert sliceable[:10:] == (0, 10, 1) - assert sliceable[::10] == (0, 100, 10) - assert sliceable[-10::] == (90, 100, 1) - assert sliceable[:-10:] == (0, 90, 1) - assert sliceable[::-10] == (99, -1, -10) - assert sliceable[50:60:1] == (50, 60, 1) - assert sliceable[50:60:-1] == (50, 60, -1) - - -def test_sequence(): - cstats = ConstructorStats.get(m.Sequence) - - s = m.Sequence(5) - assert cstats.values() == ['of size', '5'] - - assert "Sequence" in repr(s) - assert len(s) == 5 - assert s[0] == 0 and s[3] == 0 - assert 12.34 not in s - s[0], s[3] = 12.34, 56.78 - assert 12.34 in s - assert isclose(s[0], 12.34) and isclose(s[3], 56.78) - - rev = reversed(s) - assert cstats.values() == ['of size', '5'] - - rev2 = s[::-1] - assert cstats.values() == ['of size', '5'] - - it = iter(m.Sequence(0)) - for _ in range(3): # __next__ must continue to raise StopIteration - with pytest.raises(StopIteration): - next(it) - assert cstats.values() == ['of size', '0'] - - expected = [0, 56.78, 0, 0, 12.34] - assert allclose(rev, expected) - assert allclose(rev2, expected) - assert rev == rev2 - - rev[0::2] = m.Sequence([2.0, 2.0, 2.0]) - assert cstats.values() == ['of size', '3', 'from std::vector'] - - assert allclose(rev, [2, 56.78, 2, 0, 2]) - - assert cstats.alive() == 4 - del it - assert cstats.alive() == 3 - del s - assert cstats.alive() == 2 - del rev - assert cstats.alive() == 1 - del rev2 - assert cstats.alive() == 0 - - assert cstats.values() == [] - assert cstats.default_constructions == 0 - assert cstats.copy_constructions == 0 - assert cstats.move_constructions >= 1 - assert cstats.copy_assignments == 0 - assert cstats.move_assignments == 0 - - -def test_sequence_length(): - """#2076: Exception raised by len(arg) should be propagated """ - class BadLen(RuntimeError): - pass - - class SequenceLike(): - def __getitem__(self, i): - return None - - def __len__(self): - raise BadLen() - - with pytest.raises(BadLen): - m.sequence_length(SequenceLike()) - - assert m.sequence_length([1, 2, 3]) == 3 - assert m.sequence_length("hello") == 5 - - -def test_map_iterator(): - sm = m.StringMap({'hi': 'bye', 'black': 'white'}) - assert sm['hi'] == 'bye' - assert len(sm) == 2 - assert sm['black'] == 'white' - - with pytest.raises(KeyError): - assert sm['orange'] - sm['orange'] = 'banana' - assert sm['orange'] == 'banana' - - expected = {'hi': 'bye', 'black': 'white', 'orange': 'banana'} - for k in sm: - assert sm[k] == expected[k] - for k, v in sm.items(): - assert v == expected[k] - - it = iter(m.StringMap({})) - for _ in range(3): # __next__ must continue to raise StopIteration - with pytest.raises(StopIteration): - next(it) - - -def test_python_iterator_in_cpp(): - t = (1, 2, 3) - assert m.object_to_list(t) == [1, 2, 3] - assert m.object_to_list(iter(t)) == [1, 2, 3] - assert m.iterator_to_list(iter(t)) == [1, 2, 3] - - with pytest.raises(TypeError) as excinfo: - m.object_to_list(1) - assert "object is not iterable" in str(excinfo.value) - - with pytest.raises(TypeError) as excinfo: - m.iterator_to_list(1) - assert "incompatible function arguments" in str(excinfo.value) - - def bad_next_call(): - raise RuntimeError("py::iterator::advance() should propagate errors") - - with pytest.raises(RuntimeError) as excinfo: - m.iterator_to_list(iter(bad_next_call, None)) - assert str(excinfo.value) == "py::iterator::advance() should propagate errors" - - lst = [1, None, 0, None] - assert m.count_none(lst) == 2 - assert m.find_none(lst) is True - assert m.count_nonzeros({"a": 0, "b": 1, "c": 2}) == 2 - - r = range(5) - assert all(m.tuple_iterator(tuple(r))) - assert all(m.list_iterator(list(r))) - assert all(m.sequence_iterator(r)) - - -def test_iterator_passthrough(): - """#181: iterator passthrough did not compile""" - from pybind11_tests.sequences_and_iterators import iterator_passthrough - - assert list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))) == [3, 5, 7, 9, 11, 13, 15] - - -def test_iterator_rvp(): - """#388: Can't make iterators via make_iterator() with different r/v policies """ - import pybind11_tests.sequences_and_iterators as m - - assert list(m.make_iterator_1()) == [1, 2, 3] - assert list(m.make_iterator_2()) == [1, 2, 3] - assert not isinstance(m.make_iterator_1(), type(m.make_iterator_2())) diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/__init__.py b/spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/CVPR/regionclip-demo/detectron2/layers/csrc/cocoeval/cocoeval.cpp b/spaces/CVPR/regionclip-demo/detectron2/layers/csrc/cocoeval/cocoeval.cpp deleted file mode 100644 index 0a5b7b907c06720fefc77b0dfd921b8ec3ecf2be..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/layers/csrc/cocoeval/cocoeval.cpp +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#include "cocoeval.h" -#include -#include -#include -#include - -using namespace pybind11::literals; - -namespace detectron2 { - -namespace COCOeval { - -// Sort detections from highest score to lowest, such that -// detection_instances[detection_sorted_indices[t]] >= -// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match -// original COCO API -void SortInstancesByDetectionScore( - const std::vector& detection_instances, - std::vector* detection_sorted_indices) { - detection_sorted_indices->resize(detection_instances.size()); - std::iota( - detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); - std::stable_sort( - detection_sorted_indices->begin(), - detection_sorted_indices->end(), - [&detection_instances](size_t j1, size_t j2) { - return detection_instances[j1].score > detection_instances[j2].score; - }); -} - -// Partition the ground truth objects based on whether or not to ignore them -// based on area -void SortInstancesByIgnore( - const std::array& area_range, - const std::vector& ground_truth_instances, - std::vector* ground_truth_sorted_indices, - std::vector* ignores) { - ignores->clear(); - ignores->reserve(ground_truth_instances.size()); - for (auto o : ground_truth_instances) { - ignores->push_back( - o.ignore || o.area < area_range[0] || o.area > area_range[1]); - } - - ground_truth_sorted_indices->resize(ground_truth_instances.size()); - std::iota( - ground_truth_sorted_indices->begin(), - ground_truth_sorted_indices->end(), - 0); - std::stable_sort( - ground_truth_sorted_indices->begin(), - ground_truth_sorted_indices->end(), - [&ignores](size_t j1, size_t j2) { - return (int)(*ignores)[j1] < (int)(*ignores)[j2]; - }); -} - -// For each IOU threshold, greedily match each detected instance to a ground -// truth instance (if possible) and store the results -void MatchDetectionsToGroundTruth( - const std::vector& detection_instances, - const std::vector& detection_sorted_indices, - const std::vector& ground_truth_instances, - const std::vector& ground_truth_sorted_indices, - const std::vector& ignores, - const std::vector>& ious, - const std::vector& iou_thresholds, - const std::array& area_range, - ImageEvaluation* results) { - // Initialize memory to store return data matches and ignore - const int num_iou_thresholds = iou_thresholds.size(); - const int num_ground_truth = ground_truth_sorted_indices.size(); - const int num_detections = detection_sorted_indices.size(); - std::vector ground_truth_matches( - num_iou_thresholds * num_ground_truth, 0); - std::vector& detection_matches = results->detection_matches; - std::vector& detection_ignores = results->detection_ignores; - std::vector& ground_truth_ignores = results->ground_truth_ignores; - detection_matches.resize(num_iou_thresholds * num_detections, 0); - detection_ignores.resize(num_iou_thresholds * num_detections, false); - ground_truth_ignores.resize(num_ground_truth); - for (auto g = 0; g < num_ground_truth; ++g) { - ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]]; - } - - for (auto t = 0; t < num_iou_thresholds; ++t) { - for (auto d = 0; d < num_detections; ++d) { - // information about best match so far (match=-1 -> unmatched) - double best_iou = std::min(iou_thresholds[t], 1 - 1e-10); - int match = -1; - for (auto g = 0; g < num_ground_truth; ++g) { - // if this ground truth instance is already matched and not a - // crowd, it cannot be matched to another detection - if (ground_truth_matches[t * num_ground_truth + g] > 0 && - !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) { - continue; - } - - // if detected instance matched to a regular ground truth - // instance, we can break on the first ground truth instance - // tagged as ignore (because they are sorted by the ignore tag) - if (match >= 0 && !ground_truth_ignores[match] && - ground_truth_ignores[g]) { - break; - } - - // if IOU overlap is the best so far, store the match appropriately - if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) { - best_iou = ious[d][ground_truth_sorted_indices[g]]; - match = g; - } - } - // if match was made, store id of match for both detection and - // ground truth - if (match >= 0) { - detection_ignores[t * num_detections + d] = ground_truth_ignores[match]; - detection_matches[t * num_detections + d] = - ground_truth_instances[ground_truth_sorted_indices[match]].id; - ground_truth_matches[t * num_ground_truth + match] = - detection_instances[detection_sorted_indices[d]].id; - } - - // set unmatched detections outside of area range to ignore - const InstanceAnnotation& detection = - detection_instances[detection_sorted_indices[d]]; - detection_ignores[t * num_detections + d] = - detection_ignores[t * num_detections + d] || - (detection_matches[t * num_detections + d] == 0 && - (detection.area < area_range[0] || detection.area > area_range[1])); - } - } - - // store detection score results - results->detection_scores.resize(detection_sorted_indices.size()); - for (size_t d = 0; d < detection_sorted_indices.size(); ++d) { - results->detection_scores[d] = - detection_instances[detection_sorted_indices[d]].score; - } -} - -std::vector EvaluateImages( - const std::vector>& area_ranges, - int max_detections, - const std::vector& iou_thresholds, - const ImageCategoryInstances>& image_category_ious, - const ImageCategoryInstances& - image_category_ground_truth_instances, - const ImageCategoryInstances& - image_category_detection_instances) { - const int num_area_ranges = area_ranges.size(); - const int num_images = image_category_ground_truth_instances.size(); - const int num_categories = - image_category_ious.size() > 0 ? image_category_ious[0].size() : 0; - std::vector detection_sorted_indices; - std::vector ground_truth_sorted_indices; - std::vector ignores; - std::vector results_all( - num_images * num_area_ranges * num_categories); - - // Store results for each image, category, and area range combination. Results - // for each IOU threshold are packed into the same ImageEvaluation object - for (auto i = 0; i < num_images; ++i) { - for (auto c = 0; c < num_categories; ++c) { - const std::vector& ground_truth_instances = - image_category_ground_truth_instances[i][c]; - const std::vector& detection_instances = - image_category_detection_instances[i][c]; - - SortInstancesByDetectionScore( - detection_instances, &detection_sorted_indices); - if ((int)detection_sorted_indices.size() > max_detections) { - detection_sorted_indices.resize(max_detections); - } - - for (size_t a = 0; a < area_ranges.size(); ++a) { - SortInstancesByIgnore( - area_ranges[a], - ground_truth_instances, - &ground_truth_sorted_indices, - &ignores); - - MatchDetectionsToGroundTruth( - detection_instances, - detection_sorted_indices, - ground_truth_instances, - ground_truth_sorted_indices, - ignores, - image_category_ious[i][c], - iou_thresholds, - area_ranges[a], - &results_all - [c * num_area_ranges * num_images + a * num_images + i]); - } - } - } - - return results_all; -} - -// Convert a python list to a vector -template -std::vector list_to_vec(const py::list& l) { - std::vector v(py::len(l)); - for (int i = 0; i < (int)py::len(l); ++i) { - v[i] = l[i].cast(); - } - return v; -} - -// Helper function to Accumulate() -// Considers the evaluation results applicable to a particular category, area -// range, and max_detections parameter setting, which begin at -// evaluations[evaluation_index]. Extracts a sorted list of length n of all -// applicable detection instances concatenated across all images in the dataset, -// which are represented by the outputs evaluation_indices, detection_scores, -// image_detection_indices, and detection_sorted_indices--all of which are -// length n. evaluation_indices[i] stores the applicable index into -// evaluations[] for instance i, which has detection score detection_score[i], -// and is the image_detection_indices[i]'th of the list of detections -// for the image containing i. detection_sorted_indices[] defines a sorted -// permutation of the 3 other outputs -int BuildSortedDetectionList( - const std::vector& evaluations, - const int64_t evaluation_index, - const int64_t num_images, - const int max_detections, - std::vector* evaluation_indices, - std::vector* detection_scores, - std::vector* detection_sorted_indices, - std::vector* image_detection_indices) { - assert(evaluations.size() >= evaluation_index + num_images); - - // Extract a list of object instances of the applicable category, area - // range, and max detections requirements such that they can be sorted - image_detection_indices->clear(); - evaluation_indices->clear(); - detection_scores->clear(); - image_detection_indices->reserve(num_images * max_detections); - evaluation_indices->reserve(num_images * max_detections); - detection_scores->reserve(num_images * max_detections); - int num_valid_ground_truth = 0; - for (auto i = 0; i < num_images; ++i) { - const ImageEvaluation& evaluation = evaluations[evaluation_index + i]; - - for (int d = 0; - d < (int)evaluation.detection_scores.size() && d < max_detections; - ++d) { // detected instances - evaluation_indices->push_back(evaluation_index + i); - image_detection_indices->push_back(d); - detection_scores->push_back(evaluation.detection_scores[d]); - } - for (auto ground_truth_ignore : evaluation.ground_truth_ignores) { - if (!ground_truth_ignore) { - ++num_valid_ground_truth; - } - } - } - - // Sort detections by decreasing score, using stable sort to match - // python implementation - detection_sorted_indices->resize(detection_scores->size()); - std::iota( - detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); - std::stable_sort( - detection_sorted_indices->begin(), - detection_sorted_indices->end(), - [&detection_scores](size_t j1, size_t j2) { - return (*detection_scores)[j1] > (*detection_scores)[j2]; - }); - - return num_valid_ground_truth; -} - -// Helper function to Accumulate() -// Compute a precision recall curve given a sorted list of detected instances -// encoded in evaluations, evaluation_indices, detection_scores, -// detection_sorted_indices, image_detection_indices (see -// BuildSortedDetectionList()). Using vectors precisions and recalls -// and temporary storage, output the results into precisions_out, recalls_out, -// and scores_out, which are large buffers containing many precion/recall curves -// for all possible parameter settings, with precisions_out_index and -// recalls_out_index defining the applicable indices to store results. -void ComputePrecisionRecallCurve( - const int64_t precisions_out_index, - const int64_t precisions_out_stride, - const int64_t recalls_out_index, - const std::vector& recall_thresholds, - const int iou_threshold_index, - const int num_iou_thresholds, - const int num_valid_ground_truth, - const std::vector& evaluations, - const std::vector& evaluation_indices, - const std::vector& detection_scores, - const std::vector& detection_sorted_indices, - const std::vector& image_detection_indices, - std::vector* precisions, - std::vector* recalls, - std::vector* precisions_out, - std::vector* scores_out, - std::vector* recalls_out) { - assert(recalls_out->size() > recalls_out_index); - - // Compute precision/recall for each instance in the sorted list of detections - int64_t true_positives_sum = 0, false_positives_sum = 0; - precisions->clear(); - recalls->clear(); - precisions->reserve(detection_sorted_indices.size()); - recalls->reserve(detection_sorted_indices.size()); - assert(!evaluations.empty() || detection_sorted_indices.empty()); - for (auto detection_sorted_index : detection_sorted_indices) { - const ImageEvaluation& evaluation = - evaluations[evaluation_indices[detection_sorted_index]]; - const auto num_detections = - evaluation.detection_matches.size() / num_iou_thresholds; - const auto detection_index = iou_threshold_index * num_detections + - image_detection_indices[detection_sorted_index]; - assert(evaluation.detection_matches.size() > detection_index); - assert(evaluation.detection_ignores.size() > detection_index); - const int64_t detection_match = - evaluation.detection_matches[detection_index]; - const bool detection_ignores = - evaluation.detection_ignores[detection_index]; - const auto true_positive = detection_match > 0 && !detection_ignores; - const auto false_positive = detection_match == 0 && !detection_ignores; - if (true_positive) { - ++true_positives_sum; - } - if (false_positive) { - ++false_positives_sum; - } - - const double recall = - static_cast(true_positives_sum) / num_valid_ground_truth; - recalls->push_back(recall); - const int64_t num_valid_detections = - true_positives_sum + false_positives_sum; - const double precision = num_valid_detections > 0 - ? static_cast(true_positives_sum) / num_valid_detections - : 0.0; - precisions->push_back(precision); - } - - (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0; - - for (int64_t i = static_cast(precisions->size()) - 1; i > 0; --i) { - if ((*precisions)[i] > (*precisions)[i - 1]) { - (*precisions)[i - 1] = (*precisions)[i]; - } - } - - // Sample the per instance precision/recall list at each recall threshold - for (size_t r = 0; r < recall_thresholds.size(); ++r) { - // first index in recalls >= recall_thresholds[r] - std::vector::iterator low = std::lower_bound( - recalls->begin(), recalls->end(), recall_thresholds[r]); - size_t precisions_index = low - recalls->begin(); - - const auto results_ind = precisions_out_index + r * precisions_out_stride; - assert(results_ind < precisions_out->size()); - assert(results_ind < scores_out->size()); - if (precisions_index < precisions->size()) { - (*precisions_out)[results_ind] = (*precisions)[precisions_index]; - (*scores_out)[results_ind] = - detection_scores[detection_sorted_indices[precisions_index]]; - } else { - (*precisions_out)[results_ind] = 0; - (*scores_out)[results_ind] = 0; - } - } -} -py::dict Accumulate( - const py::object& params, - const std::vector& evaluations) { - const std::vector recall_thresholds = - list_to_vec(params.attr("recThrs")); - const std::vector max_detections = - list_to_vec(params.attr("maxDets")); - const int num_iou_thresholds = py::len(params.attr("iouThrs")); - const int num_recall_thresholds = py::len(params.attr("recThrs")); - const int num_categories = params.attr("useCats").cast() == 1 - ? py::len(params.attr("catIds")) - : 1; - const int num_area_ranges = py::len(params.attr("areaRng")); - const int num_max_detections = py::len(params.attr("maxDets")); - const int num_images = py::len(params.attr("imgIds")); - - std::vector precisions_out( - num_iou_thresholds * num_recall_thresholds * num_categories * - num_area_ranges * num_max_detections, - -1); - std::vector recalls_out( - num_iou_thresholds * num_categories * num_area_ranges * - num_max_detections, - -1); - std::vector scores_out( - num_iou_thresholds * num_recall_thresholds * num_categories * - num_area_ranges * num_max_detections, - -1); - - // Consider the list of all detected instances in the entire dataset in one - // large list. evaluation_indices, detection_scores, - // image_detection_indices, and detection_sorted_indices all have the same - // length as this list, such that each entry corresponds to one detected - // instance - std::vector evaluation_indices; // indices into evaluations[] - std::vector detection_scores; // detection scores of each instance - std::vector detection_sorted_indices; // sorted indices of all - // instances in the dataset - std::vector - image_detection_indices; // indices into the list of detected instances in - // the same image as each instance - std::vector precisions, recalls; - - for (auto c = 0; c < num_categories; ++c) { - for (auto a = 0; a < num_area_ranges; ++a) { - for (auto m = 0; m < num_max_detections; ++m) { - // The COCO PythonAPI assumes evaluations[] (the return value of - // COCOeval::EvaluateImages() is one long list storing results for each - // combination of category, area range, and image id, with categories in - // the outermost loop and images in the innermost loop. - const int64_t evaluations_index = - c * num_area_ranges * num_images + a * num_images; - int num_valid_ground_truth = BuildSortedDetectionList( - evaluations, - evaluations_index, - num_images, - max_detections[m], - &evaluation_indices, - &detection_scores, - &detection_sorted_indices, - &image_detection_indices); - - if (num_valid_ground_truth == 0) { - continue; - } - - for (auto t = 0; t < num_iou_thresholds; ++t) { - // recalls_out is a flattened vectors representing a - // num_iou_thresholds X num_categories X num_area_ranges X - // num_max_detections matrix - const int64_t recalls_out_index = - t * num_categories * num_area_ranges * num_max_detections + - c * num_area_ranges * num_max_detections + - a * num_max_detections + m; - - // precisions_out and scores_out are flattened vectors - // representing a num_iou_thresholds X num_recall_thresholds X - // num_categories X num_area_ranges X num_max_detections matrix - const int64_t precisions_out_stride = - num_categories * num_area_ranges * num_max_detections; - const int64_t precisions_out_index = t * num_recall_thresholds * - num_categories * num_area_ranges * num_max_detections + - c * num_area_ranges * num_max_detections + - a * num_max_detections + m; - - ComputePrecisionRecallCurve( - precisions_out_index, - precisions_out_stride, - recalls_out_index, - recall_thresholds, - t, - num_iou_thresholds, - num_valid_ground_truth, - evaluations, - evaluation_indices, - detection_scores, - detection_sorted_indices, - image_detection_indices, - &precisions, - &recalls, - &precisions_out, - &scores_out, - &recalls_out); - } - } - } - } - - time_t rawtime; - struct tm local_time; - std::array buffer; - time(&rawtime); -#ifdef _WIN32 - localtime_s(&local_time, &rawtime); -#else - localtime_r(&rawtime, &local_time); -#endif - strftime( - buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time); - return py::dict( - "params"_a = params, - "counts"_a = std::vector( - {num_iou_thresholds, - num_recall_thresholds, - num_categories, - num_area_ranges, - num_max_detections}), - "date"_a = buffer, - "precision"_a = precisions_out, - "recall"_a = recalls_out, - "scores"_a = scores_out); -} - -} // namespace COCOeval - -} // namespace detectron2 diff --git a/spaces/Cat125/text-generator-v3/main.py b/spaces/Cat125/text-generator-v3/main.py deleted file mode 100644 index ff3ff4b5164db5cd5e8c2acee7d81f9a293a5eea..0000000000000000000000000000000000000000 --- a/spaces/Cat125/text-generator-v3/main.py +++ /dev/null @@ -1,133 +0,0 @@ -from random import choice, choices - -import gradio as gr -from tokenizers import Tokenizer - -from datamanager import get_data_v3, models - -tokenizer = Tokenizer.from_pretrained("bert-base-uncased") - - -def get_next_token_results(db:dict, message_tokens:list, prev_token:str, repeat:int = 0): - results = [] - if prev_token not in db: - return [] - for token in db[prev_token]: - token.score = 0 - for context in token.contexts: - if (context in message_tokens) and (repeat <= 1 or token.prev_token == prev_token): - token.score += 1 - if token.score > 0: - results.append(token) - return results - -def get_next_token(db, message_ids, prevtoken, repeat = 0): - results = get_next_token_results(db, message_ids, prevtoken, repeat) - if len(results) == 0: - if repeat < 2: - return choice(list(db.keys())) - else: - return get_next_token(db, message_ids, prevtoken, repeat + 1) - results = list(filter(lambda x: x.score, results)) - total_results = [] - weights = [] - for result in results: - total_results.append(result.token) - weights.append(result.score) - if len(total_results) == 0: - return get_next_token(db, message_ids, prevtoken, repeat + 1) - if len(total_results) > 5: - total_results = total_results[:5] - weights = weights[:5] - return (choices(total_results, weights=weights, k=1) or '.')[0] - - -def generator(user_message, token_count, mode, model_name): - db3 = None - for key, model in models.items(): - if model['name'] == model_name: - db3 = get_data_v3(key) - break - if not db3: - raise gr.Error('Could not find model ' + str(model_name)) - message_ids = tokenizer.encode(user_message).ids - if token_count < 0 or token_count > 1000: - raise gr.Error("Invalid token count. It must be between 0 and 1000.") - text_ids = [] - curtoken = 0 - prevtoken = 0 - if mode == "Continue": - text_ids = message_ids - curtoken = text_ids[-1] - i = 0 - while len(text_ids) < token_count: - prevtoken = curtoken - curtoken = get_next_token(db3, message_ids, prevtoken) - text_ids.append(curtoken) - if 1012 in text_ids: - yield tokenizer.decode(text_ids) - break - if i == 0 and 1012 in text_ids: - raise gr.Error("Error in generating. Try to use another prompt") - i += 1 - yield tokenizer.decode(text_ids) - -demo = gr.Blocks( - title="Text Generator v2" -) - -title_html = """ -
    -

    Text Generator v2

    -

    Generates text using per-word context system

    - -
    -""" -info_text = """ -# Information about the models -### English -`Language`: English -`Quality`: 6/10 -`Sources`: ChatGPT, https://pastebin.com/WYvij310 -### English-Long -`Language`: English -`Quality`: 5/10 -`Sources`: https://pastebin.com/WYvij310, https://whiletrue.neocities.org/lte, https://rainbowfluffysheep.tokenpress.com/the-longest-text-ever/ -### Russian-Lite -`Language`: Russian -`Quality`: 5/10 -`Sources`: https://goroda.murman.ru/index.php?topic=6508.20;wap2 -### Russian-Large -`Language`: Russian -`Quality`: 6/10 -`Sources`: http://staging.budsvetom.com/literature_items/ochen-dlinnyy-tekst - -# Training -```bash -python train.py -r [-t] [-l ...] -``` -`--rebuild` (`-r`) - Models that will be trained. -`--turbo` (`-t`) - Enables turbo training. Will skip morphological analysis and just add all tokens directly. -`--log` (`-l`) - Logs listed databases to the console after training. - -> **Note**: Use `--turbo` only when training with Russian texts. -""" -with demo: - gr.HTML(title_html) - with gr.Row(): - with gr.Column(): - inp = gr.Textbox(label="Context message") - token_count = gr.Number(30, precision=1, label="Max token count") - mode = gr.Radio(["Generate", "Continue"], value="Generate", label="Mode") - model = gr.Dropdown([model_info[1]['name'] for model_info in models.items()], label="Model", value="English-Long") - with gr.Row(): - stop_btn = gr.Button("Stop", variant="stop") - btn = gr.Button("Submit", variant="primary") - with gr.Column(): - out = gr.Textbox(label="Output") - with gr.Accordion(label="Information", open=False): - gr.Markdown(info_text) - submit_event = btn.click(fn=generator, inputs=[inp, token_count, mode, model], outputs=out) - stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[submit_event], queue=False) -demo.queue(concurrency_count=3) -demo.launch() diff --git a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/resources/help/version-info.html b/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/resources/help/version-info.html deleted file mode 100644 index b8a780e49df478ec08f35c5ae7a13eefafc8ef36..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/resources/help/version-info.html +++ /dev/null @@ -1,37 +0,0 @@ -{{extend elemLayout}} - -{{block 'css'}} - -{{/block}} - -{{block 'main'}} -{{each changelogs ds idx}} -
    - {{set v = ds.version }} - {{set isDev = v[v.length-1] === 'v'}} -
    - {{if idx === 0 }} -
    当前版本 {{v}}
    - {{else}} -
    {{name || 'ws'}}版本 {{v}}
    - {{/if}} -
    -
      - {{each ds.logs log}} -
    • -

      {{@log.title}}

      - {{if log.logs.length > 0}} -
        - {{each log.logs ls}} -
      • {{@ls}}
      • - {{/each}} -
      - {{/if}} -
    • - {{/each}} -
    -
    -
    -
    -{{/each}} -{{/block}} \ No newline at end of file diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/look_this_icon/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/look_this_icon/__init__.py deleted file mode 100644 index fab54612a3c469185e43f49682b3d7d2f2fda73b..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/look_this_icon/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -from pathlib import Path -from typing import List - -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.exception import TextOverLength -from meme_generator.utils import make_jpg_or_gif - -img_dir = Path(__file__).parent / "images" - - -def look_this_icon(images: List[BuildImage], texts: List[str], args): - text = texts[0] if texts else "朋友\n先看看这个图标再说话" - frame = BuildImage.open(img_dir / "nmsl.png") - try: - frame.draw_text( - (0, 933, 1170, 1143), - text, - lines_align="center", - weight="bold", - max_fontsize=100, - min_fontsize=50, - ) - except ValueError: - raise TextOverLength(text) - - def make(img: BuildImage) -> BuildImage: - img = img.convert("RGBA").resize((515, 515), keep_ratio=True) - return frame.copy().paste(img, (599, 403), below=True) - - return make_jpg_or_gif(images[0], make) - - -add_meme( - "look_this_icon", - look_this_icon, - min_images=1, - max_images=1, - min_texts=0, - max_texts=1, - default_texts=["朋友\n先看看这个图标再说话"], - keywords=["看图标"], -) diff --git a/spaces/Codecooker/rvcapi/src/main.py b/spaces/Codecooker/rvcapi/src/main.py deleted file mode 100644 index 0d582eb60b42ea5db0762f383f968aa6c6cde85e..0000000000000000000000000000000000000000 --- a/spaces/Codecooker/rvcapi/src/main.py +++ /dev/null @@ -1,306 +0,0 @@ -import gc -import hashlib -import json -import os -import argparse -from contextlib import suppress -from urllib.parse import urlparse, parse_qs - -import gradio as gr -import yt_dlp -from pedalboard import Pedalboard, Reverb, Compressor, HighpassFilter -from pedalboard.io import AudioFile -from pydub import AudioSegment - -from mdx import run_mdx -from rvc import Config, load_hubert, get_vc, rvc_infer - -BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - -mdxnet_models_dir = os.path.join(BASE_DIR, 'mdxnet_models') -rvc_models_dir = os.path.join(BASE_DIR, 'rvc_models') -output_dir = os.path.join(BASE_DIR, 'song_output') - - -def get_youtube_video_id(url, ignore_playlist=True): - """ - Examples: - http://youtu.be/SA2iWivDJiE - http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu - http://www.youtube.com/embed/SA2iWivDJiE - http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US - """ - query = urlparse(url) - if query.hostname == 'youtu.be': - if query.path[1:] == 'watch': - return query.query[2:] - return query.path[1:] - - if query.hostname in {'www.youtube.com', 'youtube.com', 'music.youtube.com'}: - if not ignore_playlist: - # use case: get playlist id not current video in playlist - with suppress(KeyError): - return parse_qs(query.query)['list'][0] - if query.path == '/watch': - return parse_qs(query.query)['v'][0] - if query.path[:7] == '/watch/': - return query.path.split('/')[1] - if query.path[:7] == '/embed/': - return query.path.split('/')[2] - if query.path[:3] == '/v/': - return query.path.split('/')[2] - - # returns None for invalid YouTube url - return None - - -def yt_download(link): - ydl_opts = { - 'format': 'bestaudio', - 'outtmpl': '%(title)s.%(ext)s', - 'nocheckcertificate': True, - 'ignoreerrors': True, - 'no_warnings': True, - 'quiet': True, - 'extractaudio': True, - } - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - result = ydl.extract_info(link, download=True) - download_path = ydl.prepare_filename(result) - - return download_path - - -def raise_exception(error_msg, is_webui): - if is_webui: - raise gr.Error(error_msg) - else: - raise Exception(error_msg) - - -def get_rvc_model(voice_model, is_webui): - rvc_model_filename, rvc_index_filename = None, None - model_dir = os.path.join(rvc_models_dir, voice_model) - for file in os.listdir(model_dir): - ext = os.path.splitext(file)[1] - if ext == '.pth': - rvc_model_filename = file - if ext == '.index': - rvc_index_filename = file - - if rvc_model_filename is None: - error_msg = f'No model file exists in {model_dir}.' - raise_exception(error_msg, is_webui) - - return os.path.join(model_dir, rvc_model_filename), os.path.join(model_dir, rvc_index_filename) if rvc_index_filename else '' - - -def get_audio_paths(song_dir): - orig_song_path = None - instrumentals_path = None - main_vocals_dereverb_path = None - backup_vocals_path = None - - for file in os.listdir(song_dir): - if file.endswith('_Instrumental.wav'): - instrumentals_path = os.path.join(song_dir, file) - orig_song_path = instrumentals_path.replace('_Instrumental', '') - - elif file.endswith('_Vocals_Main_DeReverb.wav'): - main_vocals_dereverb_path = os.path.join(song_dir, file) - - elif file.endswith('_Vocals_Backup.wav'): - backup_vocals_path = os.path.join(song_dir, file) - - return orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path - - -def get_hash(filepath): - with open(filepath, 'rb') as f: - file_hash = hashlib.blake2b() - while chunk := f.read(8192): - file_hash.update(chunk) - - return file_hash.hexdigest()[:11] - - -def display_progress(message, percent, is_webui, progress=None): - if is_webui: - progress(percent, desc=message) - else: - print(message) - - -def preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress=None): - keep_orig = False - if input_type == 'yt': - display_progress('[~] Downloading song...', 0, is_webui, progress) - song_link = song_input.split('&')[0] - orig_song_path = yt_download(song_link) - elif input_type == 'local': - orig_song_path = song_input - keep_orig = True - else: - orig_song_path = None - - song_output_dir = os.path.join(output_dir, song_id) - - display_progress('[~] Separating Vocals from Instrumental...', 0.1, is_webui, progress) - vocals_path, instrumentals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR-MDX-NET-Voc_FT.onnx'), orig_song_path, denoise=True, keep_orig=keep_orig) - - display_progress('[~] Separating Main Vocals from Backup Vocals...', 0.2, is_webui, progress) - backup_vocals_path, main_vocals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR_MDXNET_KARA_2.onnx'), vocals_path, suffix='Backup', invert_suffix='Main', denoise=True) - - display_progress('[~] Applying DeReverb to Vocals...', 0.3, is_webui, progress) - _, main_vocals_dereverb_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'Reverb_HQ_By_FoxJoy.onnx'), main_vocals_path, invert_suffix='DeReverb', exclude_main=True, denoise=True) - - return orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path - - -def voice_change(voice_model, vocals_path, output_path, pitch_change, index_rate, filter_radius, rms_mix_rate, protect, is_webui): - rvc_model_path, rvc_index_path = get_rvc_model(voice_model, is_webui) - device = 'cuda:0' - config = Config(device, True) - hubert_model = load_hubert(device, config.is_half, os.path.join(rvc_models_dir, 'hubert_base.pt')) - cpt, version, net_g, tgt_sr, vc = get_vc(device, config.is_half, config, rvc_model_path) - - # convert main vocals - rvc_infer(rvc_index_path, index_rate, vocals_path, output_path, pitch_change, cpt, version, net_g, filter_radius, tgt_sr, rms_mix_rate, protect, vc, hubert_model) - del hubert_model, cpt - gc.collect() - - -def add_audio_effects(audio_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping): - output_path = f'{os.path.splitext(audio_path)[0]}_mixed.wav' - - # Initialize audio effects plugins - board = Pedalboard( - [ - HighpassFilter(), - Compressor(ratio=4, threshold_db=-15), - Reverb(room_size=reverb_rm_size, dry_level=reverb_dry, wet_level=reverb_wet, damping=reverb_damping) - ] - ) - - with AudioFile(audio_path) as f: - with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o: - # Read one second of audio at a time, until the file is empty: - while f.tell() < f.frames: - chunk = f.read(int(f.samplerate)) - effected = board(chunk, f.samplerate, reset=False) - o.write(effected) - - return output_path - - -def combine_audio(audio_paths, output_path, main_gain, backup_gain, inst_gain): - main_vocal_audio = AudioSegment.from_wav(audio_paths[0]) - 4 + main_gain - backup_vocal_audio = AudioSegment.from_wav(audio_paths[1]) - 6 + backup_gain - instrumental_audio = AudioSegment.from_wav(audio_paths[2]) - 7 + inst_gain - main_vocal_audio.overlay(backup_vocal_audio).overlay(instrumental_audio).export(output_path, format='mp3') - - -def song_cover_pipeline(song_input, voice_model, pitch_change, keep_files, - is_webui=0, main_gain=0, backup_gain=0, inst_gain=0, index_rate=0.5, filter_radius=3, - rms_mix_rate=0.25, protect=0.33, reverb_rm_size=0.15, reverb_wet=0.2, reverb_dry=0.8, - reverb_damping=0.7, progress=gr.Progress()): - try: - if not song_input or not voice_model: - raise_exception('Ensure that the song input field and voice model field is filled.', is_webui) - - display_progress('[~] Starting AI Cover Generation Pipeline...', 0, is_webui, progress) - - with open(os.path.join(mdxnet_models_dir, 'model_data.json')) as infile: - mdx_model_params = json.load(infile) - - # if youtube url - if urlparse(song_input).scheme == 'https': - input_type = 'yt' - song_id = get_youtube_video_id(song_input) - if song_id is None: - error_msg = 'Invalid YouTube url.' - raise_exception(error_msg, is_webui) - - # local audio file - else: - input_type = 'local' - song_input = song_input.strip('\"') - if os.path.exists(song_input): - song_id = get_hash(song_input) - else: - error_msg = f'{song_input} does not exist.' - song_id = None - raise_exception(error_msg, is_webui) - - song_dir = os.path.join(output_dir, song_id) - - if not os.path.exists(song_dir): - os.makedirs(song_dir) - orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress) - - else: - vocals_path, main_vocals_path = None, None - paths = get_audio_paths(song_dir) - - # if any of the audio files aren't available or keep intermediate files, rerun preprocess - if any(path is None for path in paths) or keep_files: - orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress) - else: - orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path = paths - - ai_vocals_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]}_{voice_model}_p{pitch_change}_i{index_rate}_fr{filter_radius}_rms{rms_mix_rate}_pro{protect}.wav') - ai_cover_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]} ({voice_model} Ver).mp3') - - if not os.path.exists(ai_vocals_path): - display_progress('[~] Converting voice using RVC...', 0.5, is_webui, progress) - voice_change(voice_model, main_vocals_dereverb_path, ai_vocals_path, pitch_change, index_rate, filter_radius, rms_mix_rate, protect, is_webui) - - display_progress('[~] Applying audio effects to vocals...', 0.8, is_webui, progress) - ai_vocals_mixed_path = add_audio_effects(ai_vocals_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping) - - display_progress('[~] Combining AI Vocals and Instrumentals...', 0.9, is_webui, progress) - combine_audio([ai_vocals_mixed_path, backup_vocals_path, instrumentals_path], ai_cover_path, main_gain, backup_gain, inst_gain) - - if not keep_files: - display_progress('[~] Removing intermediate audio files...', 0.95, is_webui, progress) - intermediate_files = [vocals_path, main_vocals_path, ai_vocals_mixed_path] - for file in intermediate_files: - if file and os.path.exists(file): - os.remove(file) - - return ai_cover_path - - except Exception as e: - raise_exception(str(e), is_webui) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Generate a AI cover song in the song_output/id directory.', add_help=True) - parser.add_argument('-i', '--song-input', type=str, required=True, help='Link to a YouTube video or the filepath to a local mp3/wav file to create an AI cover of') - parser.add_argument('-dir', '--rvc-dirname', type=str, required=True, help='Name of the folder in the rvc_models directory containing the RVC model file and optional index file to use') - parser.add_argument('-p', '--pitch-change', type=int, required=True, help='Change the pitch of the AI voice. Generally use 12 for male to female conversions and -12 for vice-versa. Use 0 for no change') - parser.add_argument('-k', '--keep-files', action=argparse.BooleanOptionalAction, help='Whether to keep all intermediate audio files generated in the song_output/id directory, e.g. Isolated Vocals/Instrumentals') - parser.add_argument('-ir', '--index-rate', type=float, default=0.5, help='A decimal number e.g. 0.5, used to reduce/resolve the timbre leakage problem. If set to 1, more biased towards the timbre quality of the training dataset') - parser.add_argument('-fr', '--filter-radius', type=int, default=3, help='A number between 0 and 7. If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.') - parser.add_argument('-rms', '--rms-mix-rate', type=float, default=0.25, help="A decimal number e.g. 0.25. Control how much to use the original vocal's loudness (0) or a fixed loudness (1).") - parser.add_argument('-pro', '--protect', type=float, default=0.33, help='A decimal number e.g. 0.33. Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy.') - parser.add_argument('-mv', '--main-vol', type=int, default=0, help='Volume change for AI main vocals in decibels. Use -3 to decrease by 3 decibels and 3 to increase by 3 decibels') - parser.add_argument('-bv', '--backup-vol', type=int, default=0, help='Volume change for backup vocals in decibels') - parser.add_argument('-iv', '--inst-vol', type=int, default=0, help='Volume change for instrumentals in decibels') - parser.add_argument('-rsize', '--reverb-size', type=float, default=0.15, help='Reverb room size between 0 and 1') - parser.add_argument('-rwet', '--reverb-wetness', type=float, default=0.2, help='Reverb wet level between 0 and 1') - parser.add_argument('-rdry', '--reverb-dryness', type=float, default=0.8, help='Reverb dry level between 0 and 1') - parser.add_argument('-rdamp', '--reverb-damping', type=float, default=0.7, help='Reverb damping between 0 and 1') - args = parser.parse_args() - - rvc_dirname = args.rvc_dirname - if not os.path.exists(os.path.join(rvc_models_dir, rvc_dirname)): - raise Exception(f'The folder {os.path.join(rvc_models_dir, rvc_dirname)} does not exist.') - - cover_path = song_cover_pipeline(args.song_input, rvc_dirname, args.pitch_change, args.keep_files, - main_gain=args.main_vol, backup_gain=args.backup_vol, inst_gain=args.inst_vol, - index_rate=args.index_rate, filter_radius=args.filter_radius, - rms_mix_rate=args.rms_mix_rate, protect=args.protect, - reverb_rm_size=args.reverb_size, reverb_wet=args.reverb_wetness, - reverb_dry=args.reverb_dryness, reverb_damping=args.reverb_damping) - print(f'[+] Cover generated at {cover_path}') diff --git a/spaces/Cpp4App/Cpp4App/CDM/README.md b/spaces/Cpp4App/Cpp4App/CDM/README.md deleted file mode 100644 index 55ccbc5a96d37a7c24f5d253e52c7c10fbc779cb..0000000000000000000000000000000000000000 --- a/spaces/Cpp4App/Cpp4App/CDM/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# UIED - UI element detection, detecting UI elements from UI screenshots or drawnings - -This project is still ongoing and this repo may be updated irregularly, I developed a web app for the UIED in http://uied.online - -## Related Publications: -[1. UIED: a hybrid tool for GUI element detection](https://dl.acm.org/doi/10.1145/3368089.3417940) - -[2. Object Detection for Graphical User Interface: Old Fashioned or Deep Learning or a Combination?](https://arxiv.org/abs/2008.05132) - ->The repo has been **upgraded with Google OCR** for GUI text detection, to use the original version in our paper (using [EAST](https://github.com/argman/EAST) as text detector), check the relase [v2.3](https://github.com/MulongXie/UIED/releases/tag/v2.3) and download the pre-trained model in [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing). - -## What is it? - -UI Element Detection (UIED) is an old-fashioned computer vision (CV) based element detection approach for graphic user interface. - -The input of UIED could be various UI image, such as mobile app or web page screenshot, UI design drawn by Photoshop or Sketch, and even some hand-drawn UI design. Then the approach detects and classifies text and graphic UI elements, and exports the detection result as JSON file for future application. - -UIED comprises two parts to detect UI text and graphic elements, such as button, image and input bar. -* For text, it leverages [Google OCR](https://cloud.google.com/vision/docs/ocr) to perfrom detection. - -* For graphical elements, it uses old-fashioned CV approaches to locate the elements and a CNN classifier to achieve classification. - -> UIED is highly customizable, you can replace both parts by your choice (e.g. other text detection approaches). Unlike black-box end-to-end deep learning approach, you can revise the algorithms in the non-text detection and merging (partially or entirely) easily to fit your task. - -![UIED Approach](https://github.com/MulongXie/UIED/blob/master/data/demo/approach.png) - -## How to use? - -### Dependency -* **Python 3.5** -* **Opencv 3.4.2** -* **Pandas** - - -### Installation - - - - -The new version of UIED equipped with Google OCR is easy to deploy and no pre-trained model is needed. Simply donwload the repo along with the dependencies. - -> Please replace the Google OCR key at `detect_text/ocr.py line 28` with your own (apply in [Google website](https://cloud.google.com/vision)). - -### Usage -To test your own image(s): -* To test single image, change *input_path_img* in ``run_single.py`` to your input image and the results will be output to *output_root*. -* To test mutiple images, change *input_img_root* in ``run_batch.py`` to your input directory and the results will be output to *output_root*. -* To adjust the parameters lively, using ``run_testing.py`` - -> Note: The best set of parameters vary for different types of GUI image (Mobile App, Web, PC). I highly recommend to first play with the ``run_testing.py`` to pick a good set of parameters for your data. - -## Folder structure -``cnn/`` -* Used to train classifier for graphic UI elements -* Set path of the CNN classification model - -``config/`` -* Set data paths -* Set parameters for graphic elements detection - -``data/`` -* Input UI images and output detection results - -``detect_compo/`` -* Non-text GUI component detection - -``detect_text/`` -* GUI text detection using Google OCR - -``detect_merge/`` -* Merge the detection results of non-text and text GUI elements - -The major detection algorithms are in ``detect_compo/``, ``detect_text/`` and ``detect_merge/`` - -## Demo -GUI element detection result for web screenshot - -![UI Components detection result](https://github.com/MulongXie/UIED/blob/master/data/demo/demo.png) diff --git a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/__init__.py b/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/__init__.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/__init__.py deleted file mode 100644 index 00c2d2b57abf56a8749329f6bf2092ffee021dca..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch - -from .batch_norm import FrozenBatchNorm2d -from .misc import Conv2d -from .misc import ConvTranspose2d -from .misc import BatchNorm2d -from .misc import interpolate -from .nms import nms -from .roi_align import ROIAlign -from .roi_align import roi_align -from .roi_pool import ROIPool -from .roi_pool import roi_pool -from .smooth_l1_loss import smooth_l1_loss -from .sigmoid_focal_loss import SigmoidFocalLoss -from .iou_loss import IOULoss -from .scale import Scale -from .deform_conv_v2 import DCN, DCNPooling -from .iou import iou_regress -from .focal_loss import Focal_Loss - -__all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool", - "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate", - "BatchNorm2d", "FrozenBatchNorm2d", "SigmoidFocalLoss", "IOULoss", - "Scale", "DCN", "DCNPooling", "iou_regress","Focal_Loss"] - diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_resources.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_resources.py deleted file mode 100644 index e0a283fc9873b524bbacb73624721353d82c34ab..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_resources.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import annotations - -from abc import ABCMeta, abstractmethod -from types import TracebackType -from typing import TypeVar - -T = TypeVar("T") - - -class AsyncResource(metaclass=ABCMeta): - """ - Abstract base class for all closeable asynchronous resources. - - Works as an asynchronous context manager which returns the instance itself on enter, and calls - :meth:`aclose` on exit. - """ - - async def __aenter__(self: T) -> T: - return self - - async def __aexit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: - await self.aclose() - - @abstractmethod - async def aclose(self) -> None: - """Close the resource.""" diff --git a/spaces/Datasculptor/MusicGen/tests/modules/__init__.py b/spaces/Datasculptor/MusicGen/tests/modules/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/MusicGen/tests/modules/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/Dinoking/Guccio-AI-Designer/netdissect/fullablate.py b/spaces/Dinoking/Guccio-AI-Designer/netdissect/fullablate.py deleted file mode 100644 index f92d2c514c0b92b3f33653c5b53198c9fd09cb80..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/netdissect/fullablate.py +++ /dev/null @@ -1,235 +0,0 @@ -import torch, sys, os, argparse, textwrap, numbers, numpy, json, PIL -from torchvision import transforms -from torch.utils.data import TensorDataset -from netdissect.progress import default_progress, post_progress, desc_progress -from netdissect.progress import verbose_progress, print_progress -from netdissect.nethook import edit_layers -from netdissect.zdataset import standard_z_sample -from netdissect.autoeval import autoimport_eval -from netdissect.easydict import EasyDict -from netdissect.modelconfig import create_instrumented_model - -help_epilog = '''\ -Example: - -python -m netdissect.evalablate \ - --segmenter "netdissect.GanImageSegmenter(segvocab='lowres', segsizes=[160,288], segdiv='quad')" \ - --model "proggan.from_pth_file('models/lsun_models/${SCENE}_lsun.pth')" \ - --outdir dissect/dissectdir \ - --classname tree \ - --layer layer4 \ - --size 1000 - -Output layout: -dissectdir/layer5/ablation/mirror-iqr.json -{ class: "mirror", - classnum: 43, - pixel_total: 41342300, - class_pixels: 1234531, - layer: "layer5", - ranking: "mirror-iqr", - ablation_units: [341, 23, 12, 142, 83, ...] - ablation_pixels: [143242, 132344, 429931, ...] -} - -''' - -def main(): - # Training settings - def strpair(arg): - p = tuple(arg.split(':')) - if len(p) == 1: - p = p + p - return p - - parser = argparse.ArgumentParser(description='Ablation eval', - epilog=textwrap.dedent(help_epilog), - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('--model', type=str, default=None, - help='constructor for the model to test') - parser.add_argument('--pthfile', type=str, default=None, - help='filename of .pth file for the model') - parser.add_argument('--outdir', type=str, default='dissect', required=True, - help='directory for dissection output') - parser.add_argument('--layer', type=strpair, - help='space-separated list of layer names to edit' + - ', in the form layername[:reportedname]') - parser.add_argument('--classname', type=str, - help='class name to ablate') - parser.add_argument('--metric', type=str, default='iou', - help='ordering metric for selecting units') - parser.add_argument('--unitcount', type=int, default=30, - help='number of units to ablate') - parser.add_argument('--segmenter', type=str, - help='directory containing segmentation dataset') - parser.add_argument('--netname', type=str, default=None, - help='name for network in generated reports') - parser.add_argument('--batch_size', type=int, default=25, - help='batch size for forward pass') - parser.add_argument('--mixed_units', action='store_true', default=False, - help='true to keep alpha for non-zeroed units') - parser.add_argument('--size', type=int, default=200, - help='number of images to test') - parser.add_argument('--no-cuda', action='store_true', default=False, - help='disables CUDA usage') - parser.add_argument('--quiet', action='store_true', default=False, - help='silences console output') - if len(sys.argv) == 1: - parser.print_usage(sys.stderr) - sys.exit(1) - args = parser.parse_args() - - # Set up console output - verbose_progress(not args.quiet) - - # Speed up pytorch - torch.backends.cudnn.benchmark = True - - # Set up CUDA - args.cuda = not args.no_cuda and torch.cuda.is_available() - if args.cuda: - torch.backends.cudnn.benchmark = True - - # Take defaults for model constructor etc from dissect.json settings. - with open(os.path.join(args.outdir, 'dissect.json')) as f: - dissection = EasyDict(json.load(f)) - if args.model is None: - args.model = dissection.settings.model - if args.pthfile is None: - args.pthfile = dissection.settings.pthfile - if args.segmenter is None: - args.segmenter = dissection.settings.segmenter - if args.layer is None: - args.layer = dissection.settings.layers[0] - args.layers = [args.layer] - - # Also load specific analysis - layername = args.layer[1] - if args.metric == 'iou': - summary = dissection - else: - with open(os.path.join(args.outdir, layername, args.metric, - args.classname, 'summary.json')) as f: - summary = EasyDict(json.load(f)) - - # Instantiate generator - model = create_instrumented_model(args, gen=True, edit=True) - if model is None: - print('No model specified') - sys.exit(1) - - # Instantiate model - device = next(model.parameters()).device - input_shape = model.input_shape - - # 4d input if convolutional, 2d input if first layer is linear. - raw_sample = standard_z_sample(args.size, input_shape[1], seed=3).view( - (args.size,) + input_shape[1:]) - dataset = TensorDataset(raw_sample) - - # Create the segmenter - segmenter = autoimport_eval(args.segmenter) - - # Now do the actual work. - labelnames, catnames = ( - segmenter.get_label_and_category_names(dataset)) - label_category = [catnames.index(c) if c in catnames else 0 - for l, c in labelnames] - labelnum_from_name = {n[0]: i for i, n in enumerate(labelnames)} - - segloader = torch.utils.data.DataLoader(dataset, - batch_size=args.batch_size, num_workers=10, - pin_memory=(device.type == 'cuda')) - - # Index the dissection layers by layer name. - - # First, collect a baseline - for l in model.ablation: - model.ablation[l] = None - - # For each sort-order, do an ablation - progress = default_progress() - classname = args.classname - classnum = labelnum_from_name[classname] - - # Get iou ranking from dissect.json - iou_rankname = '%s-%s' % (classname, 'iou') - dissect_layer = {lrec.layer: lrec for lrec in dissection.layers} - iou_ranking = next(r for r in dissect_layer[layername].rankings - if r.name == iou_rankname) - - # Get trained ranking from summary.json - rankname = '%s-%s' % (classname, args.metric) - summary_layer = {lrec.layer: lrec for lrec in summary.layers} - ranking = next(r for r in summary_layer[layername].rankings - if r.name == rankname) - - # Get ordering, first by ranking, then break ties by iou. - ordering = [t[2] for t in sorted([(s1, s2, i) - for i, (s1, s2) in enumerate(zip(ranking.score, iou_ranking.score))])] - values = (-numpy.array(ranking.score))[ordering] - if not args.mixed_units: - values[...] = 1 - - ablationdir = os.path.join(args.outdir, layername, 'fullablation') - measurements = measure_full_ablation(segmenter, segloader, - model, classnum, layername, - ordering[:args.unitcount], values[:args.unitcount]) - measurements = measurements.cpu().numpy().tolist() - os.makedirs(ablationdir, exist_ok=True) - with open(os.path.join(ablationdir, '%s.json'%rankname), 'w') as f: - json.dump(dict( - classname=classname, - classnum=classnum, - baseline=measurements[0], - layer=layername, - metric=args.metric, - ablation_units=ordering, - ablation_values=values.tolist(), - ablation_effects=measurements[1:]), f) - -def measure_full_ablation(segmenter, loader, model, classnum, layer, - ordering, values): - ''' - Quick and easy counting of segmented pixels reduced by ablating units. - ''' - progress = default_progress() - device = next(model.parameters()).device - feature_units = model.feature_shape[layer][1] - feature_shape = model.feature_shape[layer][2:] - repeats = len(ordering) - total_scores = torch.zeros(repeats + 1) - print(ordering) - print(values.tolist()) - with torch.no_grad(): - for l in model.ablation: - model.ablation[l] = None - for i, [ibz] in enumerate(progress(loader)): - ibz = ibz.cuda() - for num_units in progress(range(len(ordering) + 1)): - ablation = torch.zeros(feature_units, device=device) - ablation[ordering[:num_units]] = torch.tensor( - values[:num_units]).to(ablation.device, ablation.dtype) - model.ablation[layer] = ablation - tensor_images = model(ibz) - seg = segmenter.segment_batch(tensor_images, downsample=2) - mask = (seg == classnum).max(1)[0] - total_scores[num_units] += mask.sum().float().cpu() - return total_scores - -def count_segments(segmenter, loader, model): - total_bincount = 0 - data_size = 0 - progress = default_progress() - for i, batch in enumerate(progress(loader)): - tensor_images = model(z_batch.to(device)) - seg = segmenter.segment_batch(tensor_images, downsample=2) - bc = (seg + index[:, None, None, None] * self.num_classes).view(-1 - ).bincount(minlength=z_batch.shape[0] * self.num_classes) - data_size += seg.shape[0] * seg.shape[2] * seg.shape[3] - total_bincount += batch_label_counts.float().sum(0) - normalized_bincount = total_bincount / data_size - return normalized_bincount - -if __name__ == '__main__': - main() diff --git a/spaces/ECCV2022/bytetrack/yolox/evaluators/evaluation.py b/spaces/ECCV2022/bytetrack/yolox/evaluators/evaluation.py deleted file mode 100644 index fd72f82adf4bacc73b564a855ce10082d89f76af..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/yolox/evaluators/evaluation.py +++ /dev/null @@ -1,200 +0,0 @@ -import os -import numpy as np -import copy -import motmetrics as mm -mm.lap.default_solver = 'lap' - - -class Evaluator(object): - - def __init__(self, data_root, seq_name, data_type): - self.data_root = data_root - self.seq_name = seq_name - self.data_type = data_type - - self.load_annotations() - self.reset_accumulator() - - def load_annotations(self): - assert self.data_type == 'mot' - - gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt') - self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True) - self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True) - - def reset_accumulator(self): - self.acc = mm.MOTAccumulator(auto_id=True) - - def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False): - # results - trk_tlwhs = np.copy(trk_tlwhs) - trk_ids = np.copy(trk_ids) - - # gts - gt_objs = self.gt_frame_dict.get(frame_id, []) - gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2] - - # ignore boxes - ignore_objs = self.gt_ignore_frame_dict.get(frame_id, []) - ignore_tlwhs = unzip_objs(ignore_objs)[0] - - # remove ignored results - keep = np.ones(len(trk_tlwhs), dtype=bool) - iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5) - if len(iou_distance) > 0: - match_is, match_js = mm.lap.linear_sum_assignment(iou_distance) - match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js]) - match_ious = iou_distance[match_is, match_js] - - match_js = np.asarray(match_js, dtype=int) - match_js = match_js[np.logical_not(np.isnan(match_ious))] - keep[match_js] = False - trk_tlwhs = trk_tlwhs[keep] - trk_ids = trk_ids[keep] - #match_is, match_js = mm.lap.linear_sum_assignment(iou_distance) - #match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js]) - #match_ious = iou_distance[match_is, match_js] - - #match_js = np.asarray(match_js, dtype=int) - #match_js = match_js[np.logical_not(np.isnan(match_ious))] - #keep[match_js] = False - #trk_tlwhs = trk_tlwhs[keep] - #trk_ids = trk_ids[keep] - - # get distance matrix - iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5) - - # acc - self.acc.update(gt_ids, trk_ids, iou_distance) - - if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'): - events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics - else: - events = None - return events - - def eval_file(self, filename): - self.reset_accumulator() - - result_frame_dict = read_results(filename, self.data_type, is_gt=False) - #frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys()))) - frames = sorted(list(set(result_frame_dict.keys()))) - for frame_id in frames: - trk_objs = result_frame_dict.get(frame_id, []) - trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2] - self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False) - - return self.acc - - @staticmethod - def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')): - names = copy.deepcopy(names) - if metrics is None: - metrics = mm.metrics.motchallenge_metrics - metrics = copy.deepcopy(metrics) - - mh = mm.metrics.create() - summary = mh.compute_many( - accs, - metrics=metrics, - names=names, - generate_overall=True - ) - - return summary - - @staticmethod - def save_summary(summary, filename): - import pandas as pd - writer = pd.ExcelWriter(filename) - summary.to_excel(writer) - writer.save() - - - - - -def read_results(filename, data_type: str, is_gt=False, is_ignore=False): - if data_type in ('mot', 'lab'): - read_fun = read_mot_results - else: - raise ValueError('Unknown data type: {}'.format(data_type)) - - return read_fun(filename, is_gt, is_ignore) - - -""" -labels={'ped', ... % 1 -'person_on_vhcl', ... % 2 -'car', ... % 3 -'bicycle', ... % 4 -'mbike', ... % 5 -'non_mot_vhcl', ... % 6 -'static_person', ... % 7 -'distractor', ... % 8 -'occluder', ... % 9 -'occluder_on_grnd', ... %10 -'occluder_full', ... % 11 -'reflection', ... % 12 -'crowd' ... % 13 -}; -""" - - -def read_mot_results(filename, is_gt, is_ignore): - valid_labels = {1} - ignore_labels = {2, 7, 8, 12} - results_dict = dict() - if os.path.isfile(filename): - with open(filename, 'r') as f: - for line in f.readlines(): - linelist = line.split(',') - if len(linelist) < 7: - continue - fid = int(linelist[0]) - if fid < 1: - continue - results_dict.setdefault(fid, list()) - - box_size = float(linelist[4]) * float(linelist[5]) - - if is_gt: - if 'MOT16-' in filename or 'MOT17-' in filename: - label = int(float(linelist[7])) - mark = int(float(linelist[6])) - if mark == 0 or label not in valid_labels: - continue - score = 1 - elif is_ignore: - if 'MOT16-' in filename or 'MOT17-' in filename: - label = int(float(linelist[7])) - vis_ratio = float(linelist[8]) - if label not in ignore_labels and vis_ratio >= 0: - continue - else: - continue - score = 1 - else: - score = float(linelist[6]) - - #if box_size > 7000: - #if box_size <= 7000 or box_size >= 15000: - #if box_size < 15000: - #continue - - tlwh = tuple(map(float, linelist[2:6])) - target_id = int(linelist[1]) - - results_dict[fid].append((tlwh, target_id, score)) - - return results_dict - - -def unzip_objs(objs): - if len(objs) > 0: - tlwhs, ids, scores = zip(*objs) - else: - tlwhs, ids, scores = [], [], [] - tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) - - return tlwhs, ids, scores \ No newline at end of file diff --git a/spaces/EDGAhab/VITS-Aatrox-AI/commons.py b/spaces/EDGAhab/VITS-Aatrox-AI/commons.py deleted file mode 100644 index 9ad0444b61cbadaa388619986c2889c707d873ce..0000000000000000000000000000000000000000 --- a/spaces/EDGAhab/VITS-Aatrox-AI/commons.py +++ /dev/null @@ -1,161 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/Egrt/GCycleGAN/cyclegan.py b/spaces/Egrt/GCycleGAN/cyclegan.py deleted file mode 100644 index 285de0eccc9c38ffefa98430c61fc950f8094fa9..0000000000000000000000000000000000000000 --- a/spaces/Egrt/GCycleGAN/cyclegan.py +++ /dev/null @@ -1,106 +0,0 @@ -import numpy as np -import torch -from PIL import Image -from torch import nn - -from nets.cyclegan import Generator -from utils.utils import (cvtColor, postprocess_output, preprocess_input, - resize_image, show_config) - - -class CYCLEGAN(object): - _defaults = { - #-----------------------------------------------# - # model_path指向logs文件夹下的权值文件 - #-----------------------------------------------# - "model_path" : 'model_data/G_model_B2A_last_epoch_weights.pth', - #-----------------------------------------------# - # 输入图像大小的设置 - #-----------------------------------------------# - "input_shape" : [112, 112], - #-------------------------------# - # 是否进行不失真的resize - #-------------------------------# - "letterbox_image" : True, - #-------------------------------# - # 是否使用Cuda - # 没有GPU可以设置成False - #-------------------------------# - "cuda" : False, - } - - #---------------------------------------------------# - # 初始化CYCLEGAN - #---------------------------------------------------# - def __init__(self, **kwargs): - self.__dict__.update(self._defaults) - for name, value in kwargs.items(): - setattr(self, name, value) - self._defaults[name] = value - self.generate() - - show_config(**self._defaults) - - def generate(self): - #----------------------------------------# - # 创建GAN模型 - #----------------------------------------# - self.net = Generator(upscale=1, img_size=tuple(self.input_shape), - window_size=7, img_range=1., depths=[3, 3, 3, 3], - embed_dim=60, num_heads=[3, 3, 3, 3], mlp_ratio=1, upsampler='1conv').eval() - - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - self.net.load_state_dict(torch.load(self.model_path, map_location=device)) - self.net = self.net.eval() - print('{} model loaded.'.format(self.model_path)) - - if self.cuda: - self.net = nn.DataParallel(self.net) - self.net = self.net.cuda() - - #---------------------------------------------------# - # 生成1x1的图片 - #---------------------------------------------------# - def detect_image(self, image): - #---------------------------------------------------------# - # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。 - # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB - #---------------------------------------------------------# - image = cvtColor(image) - #---------------------------------------------------------# - # 给图像增加灰条,实现不失真的resize - # 也可以直接resize进行识别 - #---------------------------------------------------------# - image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]), self.letterbox_image) - #---------------------------------------------------------# - # 添加上batch_size维度 - #---------------------------------------------------------# - image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0) - - with torch.no_grad(): - images = torch.from_numpy(image_data) - if self.cuda: - images = images.cuda() - - #---------------------------------------------------# - # 图片传入网络进行预测 - #---------------------------------------------------# - pr = self.net(images)[0] - #---------------------------------------------------# - # 转为numpy - #---------------------------------------------------# - pr = pr.permute(1, 2, 0).cpu().numpy() - - #--------------------------------------# - # 将灰条部分截取掉 - #--------------------------------------# - if nw is not None: - pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \ - int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)] - - - image = postprocess_output(pr) - image = np.clip(image, 0, 255) - image = Image.fromarray(np.uint8(image)) - - return image diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/README.md b/spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/README.md deleted file mode 100644 index c6ef17e7659558a4f41834f4614d58caddcbe208..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Mask R-CNN - -> [Mask R-CNN](https://arxiv.org/abs/1703.06870) - - - -## Abstract - -We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. - -
    - -
    - -## Results and models - -### CTW1500 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :----------------------------------------------------------: | :--------------: | :-----------: | :----------: | :-----: | :-------: | :----: | :-------: | :---: | :-------------------------------------------------------------: | -| [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py) | ImageNet | CTW1500 Train | CTW1500 Test | 160 | 1600 | 0.753 | 0.712 | 0.732 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.log.json) | - -### ICDAR2015 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :--------------------------------------------------------: | :--------------: | :-------------: | :------------: | :-----: | :-------: | :----: | :-------: | :---: | :-----------------------------------------------------------: | -| [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py) | ImageNet | ICDAR2015 Train | ICDAR2015 Test | 160 | 1920 | 0.783 | 0.872 | 0.825 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.log.json) | - -### ICDAR2017 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :---------------------------------------------------------: | :--------------: | :-------------: | :-----------: | :-----: | :-------: | :----: | :-------: | :---: | :-----------------------------------------------------------: | -| [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017.py) | ImageNet | ICDAR2017 Train | ICDAR2017 Val | 160 | 1600 | 0.754 | 0.827 | 0.789 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.log.json) | - -```{note} -We tuned parameters with the techniques in [Pyramid Mask Text Detector](https://arxiv.org/abs/1903.11800) -``` - -## Citation - -```bibtex -@INPROCEEDINGS{8237584, - author={K. {He} and G. {Gkioxari} and P. {Dollár} and R. {Girshick}}, - booktitle={2017 IEEE International Conference on Computer Vision (ICCV)}, - title={Mask R-CNN}, - year={2017}, - pages={2980-2988}, - doi={10.1109/ICCV.2017.322}} -``` diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textdet/textsnake/README.md b/spaces/EuroPython2022/mmocr-demo/configs/textdet/textsnake/README.md deleted file mode 100644 index be7f3fe7bb15f5610669e937179adca7210039b8..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/textdet/textsnake/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# Textsnake - -> [TextSnake: A Flexible Representation for Detecting Text of Arbitrary Shapes](https://arxiv.org/abs/1807.01544) - - - -## Abstract - -Driven by deep neural networks and large scale datasets, scene text detection methods have progressed substantially over the past years, continuously refreshing the performance records on various standard benchmarks. However, limited by the representations (axis-aligned rectangles, rotated rectangles or quadrangles) adopted to describe text, existing methods may fall short when dealing with much more free-form text instances, such as curved text, which are actually very common in real-world scenarios. To tackle this problem, we propose a more flexible representation for scene text, termed as TextSnake, which is able to effectively represent text instances in horizontal, oriented and curved forms. In TextSnake, a text instance is described as a sequence of ordered, overlapping disks centered at symmetric axes, each of which is associated with potentially variable radius and orientation. Such geometry attributes are estimated via a Fully Convolutional Network (FCN) model. In experiments, the text detector based on TextSnake achieves state-of-the-art or comparable performance on Total-Text and SCUT-CTW1500, the two newly published benchmarks with special emphasis on curved text in natural images, as well as the widely-used datasets ICDAR 2015 and MSRA-TD500. Specifically, TextSnake outperforms the baseline on Total-Text by more than 40% in F-measure. - -
    - -
    - -## Results and models - -### CTW1500 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :----------------------------------------------------------: | :--------------: | :-----------: | :----------: | :-----: | :-------: | :----: | :-------: | :---: | :-------------------------------------------------------------: | -| [TextSnake](/configs/textdet/textsnake/textsnake_r50_fpn_unet_600e_ctw1500.py) | ImageNet | CTW1500 Train | CTW1500 Test | 1200 | 736 | 0.795 | 0.840 | 0.817 | [model](https://download.openmmlab.com/mmocr/textdet/textsnake/textsnake_r50_fpn_unet_1200e_ctw1500-27f65b64.pth) \| [log](<>) | - -## Citation - -```bibtex -@article{long2018textsnake, - title={TextSnake: A Flexible Representation for Detecting Text of Arbitrary Shapes}, - author={Long, Shangbang and Ruan, Jiaqiang and Zhang, Wenjie and He, Xin and Wu, Wenhao and Yao, Cong}, - booktitle={ECCV}, - pages={20-36}, - year={2018} -} -``` diff --git a/spaces/EveryPizza/Cartoony-Gradio-Theme/README.md b/spaces/EveryPizza/Cartoony-Gradio-Theme/README.md deleted file mode 100644 index 1ee9a5e5361525bbeb976c1b99d489ebb8919f0f..0000000000000000000000000000000000000000 --- a/spaces/EveryPizza/Cartoony-Gradio-Theme/README.md +++ /dev/null @@ -1,17 +0,0 @@ - ---- -tags: [gradio-theme] -title: Cartoony-Gradio-Theme -colorFrom: orange -colorTo: purple -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- -# Cartoony-Gradio-Theme -## Description -Add a description of this theme here! -## Contributions -Thanks to [@EveryPizza](https://huggingface.co/EveryPizza) for adding this gradio theme! diff --git a/spaces/Felix123456/bingo/src/lib/bots/bing/utils.ts b/spaces/Felix123456/bingo/src/lib/bots/bing/utils.ts deleted file mode 100644 index 64b4b96452d125346b0fc4436b5f7c18c962df0b..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/lib/bots/bing/utils.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { ChatResponseMessage, BingChatResponse } from './types' - -export function convertMessageToMarkdown(message: ChatResponseMessage): string { - if (message.messageType === 'InternalSearchQuery') { - return message.text - } - for (const card of message.adaptiveCards??[]) { - for (const block of card.body) { - if (block.type === 'TextBlock') { - return block.text - } - } - } - return '' -} - -const RecordSeparator = String.fromCharCode(30) - -export const websocketUtils = { - packMessage(data: any) { - return `${JSON.stringify(data)}${RecordSeparator}` - }, - unpackMessage(data: string | ArrayBuffer | Blob) { - if (!data) return {} - return data - .toString() - .split(RecordSeparator) - .filter(Boolean) - .map((s) => { - try { - return JSON.parse(s) - } catch (e) { - return {} - } - }) - }, -} - -export async function createImage(prompt: string, id: string, headers: HeadersInit): Promise { - const { headers: responseHeaders } = await fetch(`https://www.bing.com/images/create?partner=sydney&re=1&showselective=1&sude=1&kseed=7000&SFX=&q=${encodeURIComponent(prompt)}&iframeid=${id}`, - { - method: 'HEAD', - headers, - redirect: 'manual' - }, - ); - - if (!/&id=([^&]+)$/.test(responseHeaders.get('location') || '')) { - throw new Error('请求异常,请检查 cookie 是否有效') - } - - const resultId = RegExp.$1; - let count = 0 - const imageThumbUrl = `https://www.bing.com/images/create/async/results/${resultId}?q=${encodeURIComponent(prompt)}&partner=sydney&showselective=1&IID=images.as`; - - do { - await sleep(3000); - const content = await fetch(imageThumbUrl, { headers, method: 'GET' }) - - // @ts-ignore - if (content.headers.get('content-length') > 1) { - const text = await content.text() - return (text?.match(/ target?.split('src="').pop()?.replace(/&/g, '&')) - .map(img => `![${prompt}](${img})`).join(' ') - } - } while(count ++ < 10); -} - - -export async function* streamAsyncIterable(stream: ReadableStream) { - const reader = stream.getReader() - try { - while (true) { - const { done, value } = await reader.read() - if (done) { - return - } - yield value - } - } finally { - reader.releaseLock() - } -} - -export const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)) - diff --git a/spaces/Felix123456/bingo/src/lib/hooks/use-copy-to-clipboard.tsx b/spaces/Felix123456/bingo/src/lib/hooks/use-copy-to-clipboard.tsx deleted file mode 100644 index 62f7156dca246c46b213151af003a3a177977ccf..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/lib/hooks/use-copy-to-clipboard.tsx +++ /dev/null @@ -1,33 +0,0 @@ -'use client' - -import * as React from 'react' - -export interface useCopyToClipboardProps { - timeout?: number -} - -export function useCopyToClipboard({ - timeout = 2000 -}: useCopyToClipboardProps) { - const [isCopied, setIsCopied] = React.useState(false) - - const copyToClipboard = (value: string) => { - if (typeof window === 'undefined' || !navigator.clipboard?.writeText) { - return - } - - if (!value) { - return - } - - navigator.clipboard.writeText(value).then(() => { - setIsCopied(true) - - setTimeout(() => { - setIsCopied(false) - }, timeout) - }) - } - - return { isCopied, copyToClipboard } -} diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/metrics/metric_util.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/metrics/metric_util.py deleted file mode 100644 index 4d18f0f7816431bed6af9d58319c6435bdf5c971..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/metrics/metric_util.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np - -from basicsr.utils.matlab_functions import bgr2ycbcr - - -def reorder_image(img, input_order='HWC'): - """Reorder images to 'HWC' order. - - If the input_order is (h, w), return (h, w, 1); - If the input_order is (c, h, w), return (h, w, c); - If the input_order is (h, w, c), return as it is. - - Args: - img (ndarray): Input image. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - If the input image shape is (h, w), input_order will not have - effects. Default: 'HWC'. - - Returns: - ndarray: reordered image. - """ - - if input_order not in ['HWC', 'CHW']: - raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'") - if len(img.shape) == 2: - img = img[..., None] - if input_order == 'CHW': - img = img.transpose(1, 2, 0) - return img - - -def to_y_channel(img): - """Change to Y channel of YCbCr. - - Args: - img (ndarray): Images with range [0, 255]. - - Returns: - (ndarray): Images with range [0, 255] (float type) without round. - """ - img = img.astype(np.float32) / 255. - if img.ndim == 3 and img.shape[2] == 3: - img = bgr2ycbcr(img, y_only=True) - img = img[..., None] - return img * 255. diff --git a/spaces/Fox1997/vits-uma-genshin-honkai/README.md b/spaces/Fox1997/vits-uma-genshin-honkai/README.md deleted file mode 100644 index 1c0aa069bfd980b6b45bb2bf62ff74bd9b0b61c2..0000000000000000000000000000000000000000 --- a/spaces/Fox1997/vits-uma-genshin-honkai/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -license: apache-2.0 -title: ' vits-uma-genshin-honkai' -sdk: gradio -sdk_version: 3.7 -emoji: 🐨 -colorTo: yellow -pinned: false -app_file: app.py -duplicated_from: ikechan8370/vits-uma-genshin-honkai ---- diff --git a/spaces/FritsLyneborg/kunstnerfrits/src/dalle_mini/model/utils.py b/spaces/FritsLyneborg/kunstnerfrits/src/dalle_mini/model/utils.py deleted file mode 100644 index 99e0686e38b546cf474b2a7f6a2d3f91e34b9a24..0000000000000000000000000000000000000000 --- a/spaces/FritsLyneborg/kunstnerfrits/src/dalle_mini/model/utils.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import tempfile -from pathlib import Path - -import wandb - - -class PretrainedFromWandbMixin: - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): - """ - Initializes from a wandb artifact or delegates loading to the superclass. - """ - with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies - if ":" in pretrained_model_name_or_path and not os.path.isdir( - pretrained_model_name_or_path - ): - # wandb artifact - if wandb.run is not None: - artifact = wandb.run.use_artifact(pretrained_model_name_or_path) - else: - artifact = wandb.Api().artifact(pretrained_model_name_or_path) - pretrained_model_name_or_path = artifact.download(tmp_dir) - - return super(PretrainedFromWandbMixin, cls).from_pretrained( - pretrained_model_name_or_path, *model_args, **kwargs - ) diff --git a/spaces/Giuliano/image_classification/app.py b/spaces/Giuliano/image_classification/app.py deleted file mode 100644 index 93b66b4dee2a87b6b18193c1008f01ec0aeb957f..0000000000000000000000000000000000000000 --- a/spaces/Giuliano/image_classification/app.py +++ /dev/null @@ -1,6 +0,0 @@ -import gradio as gr - -iface = gr.Interface.load("huggingface/Giuliano/vit-lung-cancer", -) - -iface.launch() \ No newline at end of file diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/configs/__init__.py b/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/configs/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 68c57dfb242c6681cda6ead27929d6737c74fc45..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,75 +0,0 @@ -_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://detectron2/resnet50_caffe', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe'), - roi_head=dict( - bbox_head=dict( - bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rcnn=dict( - assigner=dict( - pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65), - sampler=dict(num=256))), - test_cfg=dict(rcnn=dict(score_thr=1e-3))) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=300), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=300), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='ToTensor', keys=['proposals']), - dict( - type='ToDataContainer', - fields=[dict(key='proposals', stack=False)]), - dict(type='Collect', keys=['img', 'proposals']), - ]) -] -data = dict( - train=dict( - proposal_file=data_root + - 'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl', - pipeline=train_pipeline), - val=dict( - proposal_file=data_root + - 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl', - pipeline=test_pipeline), - test=dict( - proposal_file=data_root + - 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl', - pipeline=test_pipeline)) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py deleted file mode 100644 index f76040434f1ff07608c83202f779dfacfe91c323..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py +++ /dev/null @@ -1,32 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - type='DetectoRS_ResNet', - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - output_img=True), - neck=dict( - type='RFP', - rfp_steps=2, - aspp_out_channels=64, - aspp_dilations=(1, 3, 6, 1), - rfp_backbone=dict( - rfp_inplanes=256, - type='DetectoRS_ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - pretrained='torchvision://resnet50', - style='pytorch'))) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/__init__.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/__init__.py deleted file mode 100644 index 9b18b30a258c32283cbfc03ba01781a19fd993c1..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset -from .cityscapes import CityscapesDataset -from .coco import CocoDataset -from .custom import CustomDataset -from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, - RepeatDataset) -from .deepfashion import DeepFashionDataset -from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset -from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler -from .utils import (NumClassCheckHook, get_loading_pipeline, - replace_ImageToTensor) -from .voc import VOCDataset -from .wider_face import WIDERFaceDataset -from .xml_style import XMLDataset - -__all__ = [ - 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset', - 'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', - 'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler', - 'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', - 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES', - 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline', - 'NumClassCheckHook' -] diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/README.md b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/README.md deleted file mode 100644 index 655a845c6ae177c5e18445754f2b4daf823c5c4b..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# Dual Attention Network for Scene Segmentation - -## Introduction - - - -```latex -@article{fu2018dual, - title={Dual Attention Network for Scene Segmentation}, - author={Jun Fu, Jing Liu, Haijie Tian, Yong Li, Yongjun Bao, Zhiwei Fang,and Hanqing Lu}, - booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year={2019} -} -``` - -## Results and models - -### Cityscapes - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| DANet | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.66 | 78.74 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324-c0dbfa5f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324.log.json) | -| DANet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.99 | 80.52 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831-c57a7157.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831.log.json) | -| DANet | R-50-D8 | 769x769 | 40000 | 8.8 | 1.56 | 78.88 | 80.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703-76681c60.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703.log.json) | -| DANet | R-101-D8 | 769x769 | 40000 | 12.8 | 1.07 | 79.88 | 81.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717-dcb7fd4e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717.log.json) | -| DANet | R-50-D8 | 512x1024 | 80000 | - | - | 79.34 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029-2bfa2293.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029.log.json) | -| DANet | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918-955e6350.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918.log.json) | -| DANet | R-50-D8 | 769x769 | 80000 | - | - | 79.27 | 80.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954-495689b4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954.log.json) | -| DANet | R-101-D8 | 769x769 | 80000 | - | - | 80.47 | 82.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918-f3a929e7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918.log.json) | - -### ADE20K - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| DANet | R-50-D8 | 512x512 | 80000 | 11.5 | 21.20 | 41.66 | 42.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125-edb18e08.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125.log.json) | -| DANet | R-101-D8 | 512x512 | 80000 | 15 | 14.18 | 43.64 | 45.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126-d0357c73.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126.log.json) | -| DANet | R-50-D8 | 512x512 | 160000 | - | - | 42.45 | 43.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340-9cb35dcd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340.log.json) | -| DANet | R-101-D8 | 512x512 | 160000 | - | - | 44.17 | 45.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348-23bf12f9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348.log.json) | - -### Pascal VOC 2012 + Aug - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| DANet | R-50-D8 | 512x512 | 20000 | 6.5 | 20.94 | 74.45 | 75.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026-9e9e3ab3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026.log.json) | -| DANet | R-101-D8 | 512x512 | 20000 | 9.9 | 13.76 | 76.02 | 77.23 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026-d48d23b2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026.log.json) | -| DANet | R-50-D8 | 512x512 | 40000 | - | - | 76.37 | 77.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526-426e3a64.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526.log.json) | -| DANet | R-101-D8 | 512x512 | 40000 | - | - | 76.51 | 77.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031-788e232a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031.log.json) | diff --git a/spaces/Groq/mlagility/app.py b/spaces/Groq/mlagility/app.py deleted file mode 100644 index 6eabdd39c53d5fefee6341521ea110e9f789521e..0000000000000000000000000000000000000000 --- a/spaces/Groq/mlagility/app.py +++ /dev/null @@ -1,230 +0,0 @@ -from os import listdir -from os.path import isfile, join -import pandas as pd -import streamlit as st # pylint: disable=import-error -import graphs -from streamlit_helpers import add_filter, slider_filter, Collapsable - -st.set_page_config( - page_title="MLAgility Tracker", - page_icon="⚡", - layout="wide", -) - -# dashboard title -st.title("MLAgility Tracker ⚡") - -st.warning( - ( - "MLAgility is under active development and we are currently working on a list of critical data " - "validation tasks available at github.com/groq/mlagility/labels/validation. We are sharing this " - "dashboard and the data within for the sole purpose of gathering early feedback. See our FAQ below " - "for more details about license and liability. For feedback please email " - "{jfowers,dhnoronha,rsivakumar}@groq.com." - ), - icon="⚠️", -) - - -def add_faq() -> None: - """ - Displays FAQ using Collapsable sections - """ - faq = Collapsable() - faq.add_section( - "How is MLAgility different from MLPerf?", - ( - "Deep learning pioneers have been judging their progress with the Machine Learning " - "Performance (MLPerf) inference benchmark, but have found that the corpus of models " - "is small enough that it allows vendors to primarily compete by hand-optimizing " - "kernels. MLAgility offers a complementary approach to MLPerf by examining the " - "capability of vendors to provide turnkey solutions to a larger corpus of " - "off-the-shelf models. By providing a workflow that is representative of the " - "mass adoption customer on a variety of ML accelerators and effectively disallowing " - "hand-crafted kernels, MLAgility bridges the gap between MLPerf and the mass adoption " - "of hardware acceleration." - ), - ) - faq.add_section( - "Why now for MLAgility?", - ( - "Deep learning algorithms and their associated DL hardware accelerators are " - "transitioning from early adoption into mass adoption. Production DL is now " - "becoming available to the masses, with a desire to customize models to tackle " - "their specific problems, and then take the path of least resistance into " - "production. A market for turnkey solutions, starting with a model as input and " - "provision a cost- and latency-effective acceleration solution, often in the cloud, " - "as output, has emerged." - ), - ) - faq.add_section( - "Which tool was used to generate those results?", - ( - "All MLAgility results have been generated using the benchit tool v1.0.0, which is part " - "of the MLAgility Github Repository. You can learn more about it " - 'here.' - ), - ) - faq.add_section( - "What is the experimental setup for each of the devices?", - [ - "x86: Intel(R) Xeon(R) X40 CPU @ 2.00GHz on Google Cloud (custom: n2, 80 vCPU, 64.00 GiB) and OnnxRuntime version 1.14.0.", - "nvidia: NVIDIA A100 40GB on Google Cloud (a2-highgpu-1g) and TensorRT version 22.12-py3.", - "groq: GroqChip 1 on selfhosted GroqNode server, GroqFlow version 3.0.2 TestPyPI package, and a pre-release of GroqWare™ Suite version 0.10.0.", - ( - "You can find more details about the methodology " - 'here.' - ), - ], - ) - faq.add_section( - "What are the current key limitations of those results?", - [ - ( - "Groq's latency is computed using GroqModel.estimate_latency(), which takes" - " into account deterministic compute time and estimates an ideal runtime with" - " ideal I/O time. It does not take into account runtime performance." - ), - ( - "Results currently only represent batch 1 performance on a limited number of models, " - "devices, vendors, and runtimes. You can learn more about future directions by reading " - 'the "What are the future directions of MLAgility?" FAQ section.' - ), - ( - "Results are currently being validated. You can have a look at our current validation " - "tasks and other limitations " - 'here.' - ), - ], - ) - faq.add_section( - "What are the future directions of MLAgility?", - [ - "Include additional classes of models (e.g. LLMs, GNNs, DLRMs).", - "Perform experiments that include sweeps over batch and input sizes.", - "Increase the number of devices from existing vendors (e.g. T4, A10, and H100).", - "Include devices from additional vendors (e.g. ARM, and AMD)." - "Include the number of runtimes supported (e.g. ORT and PyTorch for CUDA, PyTorch for x86).", - ], - ) - faq.add_section( - "Who runs MLAgility?", - ( - "MLAgility is currently maintained by the following individuals (in alphabetical order): " - "Daniel Holanda Noronha, Jeremy Fowers, Kalin Ovtcharov, and Ramakrishnan Sivakumar. We are actively seeking collaborators from across the industry." - ), - ) - faq.add_section( - "License and Liability", - ( - 'THE MLAGILITY BENCHMARK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ' - "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, " - "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE " - "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER " - "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, " - "OUT OF OR IN CONNECTION WITH THE BENCHMARK OR THE USE OR OTHER DEALINGS IN THE " - "BENCHMARK. Read more about it " - 'here.' - ), - ) - - faq.deploy() - - -# Add all filters to sidebar -with st.sidebar: - - st.markdown("# Filters") - - # Get all reports of a given test type - REPORT_FOLDER = "reports" - reports = sorted( - [f for f in listdir(REPORT_FOLDER) if isfile(join(REPORT_FOLDER, f))] - ) - - # Select and read a report - selected_report = st.selectbox("Test date", reports, index=len(reports) - 1) - selected_report_idx = reports.index(selected_report) - report = pd.read_csv(f"{REPORT_FOLDER}/{selected_report}") - - # Convert int parameters to int/float - for p in ["groq_chips_used", "params"]: - report[p] = report[p].replace("-", 0).astype("int64") - - # Add parameter filter - st.markdown("#### Parameters") - - report = slider_filter( - [report], "Select a range parameters (in millions)", filter_by="params" - )[0] - - # Add author filter - report = add_filter( - [report], - "Origin", - label="author", - num_cols=2, - )[0] - - # Add task filter - report = add_filter([report], "Tasks", label="task", options=None)[0] - - -st.markdown("## Summary Results") - -graphs.device_funnel(report) - -st.markdown("""#### Benchmark results""") -baseline = st.selectbox("Baseline", ("x86", "nvidia", "groq")) -graphs.speedup_text_summary(report, baseline) -graphs.speedup_bar_chart(report, baseline) - -cols = st.columns(2) -with cols[0]: - st.markdown("""#### Workload origin""") - graphs.workload_origin(report) - -with cols[1]: - st.markdown("""#### Parameter Size Distribution""") - graphs.parameter_histogram(report, show_assembled=False) - -# FAQ Block -st.markdown("""## About this workload analysis (FAQ)""") -add_faq() - -# Detailed data view (table) -st.markdown("## Detailed Data View") - -# Add columns that do not exist yet -report["gpu_chips_used"] = 1 -report["cpu_chips_used"] = 1 - - -# Using 3 significant digits -report["groq_estimated_latency"] = [ - "-" if x == "-" else "{:.3f}".format(float(x)) - for x in report["groq_estimated_latency"] -] -report["nvidia_latency"] = [ - "-" if x == "-" else "{:.3f}".format(float(x)) for x in report["nvidia_latency"] -] -report["x86_latency"] = [ - "-" if x == "-" else "{:.3f}".format(float(x)) for x in report["x86_latency"] -] - -renamed_cols = { - "model_name": "Model Name", - "author": "Source", - "params": "Parameters", - "groq_estimated_latency": "GroqChip 1: Latency (ms)", - "nvidia_latency": "NVIDIA A100-PCIE-40GB: Latency (ms)", - "x86_latency": "Intel(R) Xeon(R) x40 CPU: Latency (ms)", - "groq_chips_used": "GroqChip 1: Chips Used", - "gpu_chips_used": "NVIDIA A100-PCIE-40GB: Chips Used", - "cpu_chips_used": "Intel(R) Xeon(R) x40 CPU: Chips Used", -} - -report.rename(columns=renamed_cols, inplace=True) -selected_cols = list(renamed_cols.values()) - -graphs.results_table(report[selected_cols]) # pylint: disable=unsubscriptable-object diff --git a/spaces/HarryLee/eCommerceImageCaptioning/ofa_module/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/ofa_module/__init__.py deleted file mode 100644 index 30b147a95464b55f55a0dd1dc8555ca69ebec358..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/ofa_module/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -import data -import models -import tasks -import criterions -import utils \ No newline at end of file diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/scripts/glow/train_glow.sh b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/scripts/glow/train_glow.sh deleted file mode 100644 index f12939d5d4563de555bf49408fa7a27397e0dae3..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/scripts/glow/train_glow.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -gender='male' - -config='../../config/glow/'$gender'.json' -modeldir='../../checkpoints/glow/'$gender -logdir='../../logs/glow/'$gender -init=1 # 1 if start from scratch. 0 if start from last checkpoint - - -#################################################### - -if [[ $init -eq 1 ]] -then - python ../../src/glow_tts/init.py -c $config -m $modeldir -l $logdir -fi -python ../../src/glow_tts/train.py -c $config -m $modeldir -l $logdir diff --git a/spaces/Harveenchadha/oiTrans/inference/__init__.py b/spaces/Harveenchadha/oiTrans/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/Vocabulary.py b/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/Vocabulary.py deleted file mode 100644 index 3b79c96dbf5200852ece221cdd9a60bfbf0865ab..0000000000000000000000000000000000000000 --- a/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/Vocabulary.py +++ /dev/null @@ -1,78 +0,0 @@ -__author__ = 'Taneem Jan, taneemishere.github.io' - -import sys -import numpy as np - -START_TOKEN = "" -END_TOKEN = "" -PLACEHOLDER = " " -SEPARATOR = '->' - - -class Vocabulary: - def __init__(self): - self.binary_vocabulary = {} - self.vocabulary = {} - self.token_lookup = {} - self.size = 0 - - self.append(START_TOKEN) - self.append(END_TOKEN) - self.append(PLACEHOLDER) - - def append(self, token): - if token not in self.vocabulary: - self.vocabulary[token] = self.size - self.token_lookup[self.size] = token - self.size += 1 - - def create_binary_representation(self): - if sys.version_info >= (3,): - items = self.vocabulary.items() - else: - items = self.vocabulary.iteritems() - for key, value in items: - binary = np.zeros(self.size) - binary[value] = 1 - self.binary_vocabulary[key] = binary - - def get_serialized_binary_representation(self): - if len(self.binary_vocabulary) == 0: - self.create_binary_representation() - - string = "" - if sys.version_info >= (3,): - items = self.binary_vocabulary.items() - else: - items = self.binary_vocabulary.iteritems() - for key, value in items: - array_as_string = np.array2string(value, separator=',', max_line_width=self.size * self.size) - string += "{}{}{}\n".format(key, SEPARATOR, array_as_string[1:len(array_as_string) - 1]) - return string - - def save(self, path): - output_file_name = "{}/words.vocab".format(path) - output_file = open(output_file_name, 'w') - output_file.write(self.get_serialized_binary_representation()) - output_file.close() - - def retrieve(self, path): - input_file = open("{}/words.vocab".format(path), 'r') - buffer = "" - for line in input_file: - try: - separator_position = len(buffer) + line.index(SEPARATOR) - buffer += line - key = buffer[:separator_position] - value = buffer[separator_position + len(SEPARATOR):] - value = np.fromstring(value, sep=',') - - self.binary_vocabulary[key] = value - self.vocabulary[key] = np.where(value == 1)[0][0] - self.token_lookup[np.where(value == 1)[0][0]] = key - - buffer = "" - except ValueError: - buffer += line - input_file.close() - self.size = len(self.vocabulary) diff --git a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/models/__init__.py b/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/models/__init__.py deleted file mode 100644 index 3e3039b7081a9e3228c8abefb6391a75b4864439..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/models/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .wav2vec_u import Wav2vec_U - - -__all__ = [ - "Wav2vec_U", -] diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/mask_tokens_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/mask_tokens_dataset.py deleted file mode 100644 index 9123235594c3977994a3ae8a03ab4c9e395cc5de..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/data/mask_tokens_dataset.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from functools import lru_cache - -import numpy as np -import torch -from fairseq.data import Dictionary, data_utils - -from . import BaseWrapperDataset, LRUCacheDataset - - -class MaskTokensDataset(BaseWrapperDataset): - """ - A wrapper Dataset for masked language modeling. - - Input items are masked according to the specified masking probability. - - Args: - dataset: Dataset to wrap. - sizes: Sentence lengths - vocab: Dictionary with the vocabulary and special tokens. - pad_idx: Id of pad token in vocab - mask_idx: Id of mask token in vocab - return_masked_tokens: controls whether to return the non-masked tokens - (the default) or to return a tensor with the original masked token - IDs (and *pad_idx* elsewhere). The latter is useful as targets for - masked LM training. - seed: Seed for random number generator for reproducibility. - mask_prob: probability of replacing a token with *mask_idx*. - leave_unmasked_prob: probability that a masked token is unmasked. - random_token_prob: probability of replacing a masked token with a - random token from the vocabulary. - freq_weighted_replacement: sample random replacement words based on - word frequencies in the vocab. - mask_whole_words: only mask whole words. This should be a byte mask - over vocab indices, indicating whether it is the beginning of a - word. We will extend any mask to encompass the whole word. - bpe: BPE to use for whole-word masking. - mask_multiple_length : repeat each mask index multiple times. Default - value is 1. - mask_stdev : standard deviation of masks distribution in case of - multiple masking. Default value is 0. - """ - - @classmethod - def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs): - """Return the source and target datasets for masked LM training.""" - dataset = LRUCacheDataset(dataset) - return ( - LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)), - LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)), - ) - - def __init__( - self, - dataset: torch.utils.data.Dataset, - vocab: Dictionary, - pad_idx: int, - mask_idx: int, - return_masked_tokens: bool = False, - seed: int = 1, - mask_prob: float = 0.15, - leave_unmasked_prob: float = 0.1, - random_token_prob: float = 0.1, - freq_weighted_replacement: bool = False, - mask_whole_words: torch.Tensor = None, - mask_multiple_length: int = 1, - mask_stdev: float = 0.0, - ): - assert 0.0 < mask_prob < 1.0 - assert 0.0 <= random_token_prob <= 1.0 - assert 0.0 <= leave_unmasked_prob <= 1.0 - assert random_token_prob + leave_unmasked_prob <= 1.0 - assert mask_multiple_length >= 1 - assert mask_stdev >= 0.0 - - self.dataset = dataset - self.vocab = vocab - self.pad_idx = pad_idx - self.mask_idx = mask_idx - self.return_masked_tokens = return_masked_tokens - self.seed = seed - self.mask_prob = mask_prob - self.leave_unmasked_prob = leave_unmasked_prob - self.random_token_prob = random_token_prob - self.mask_whole_words = mask_whole_words - self.mask_multiple_length = mask_multiple_length - self.mask_stdev = mask_stdev - - if random_token_prob > 0.0: - if freq_weighted_replacement: - weights = np.array(self.vocab.count) - else: - weights = np.ones(len(self.vocab)) - weights[: self.vocab.nspecial] = 0 - self.weights = weights / weights.sum() - - self.epoch = 0 - - @property - def can_reuse_epoch_itr_across_epochs(self): - return True # only the noise changes, not item sizes - - def set_epoch(self, epoch, **unused): - super().set_epoch(epoch) - self.epoch = epoch - - def __getitem__(self, index: int): - return self.__getitem_cached__(self.seed, self.epoch, index) - - @lru_cache(maxsize=8) - def __getitem_cached__(self, seed: int, epoch: int, index: int): - with data_utils.numpy_seed(self.seed, self.epoch, index): - item = self.dataset[index] - sz = len(item) - - assert ( - self.mask_idx not in item - ), "Dataset contains mask_idx (={}), this is not expected!".format( - self.mask_idx, - ) - - if self.mask_whole_words is not None: - word_begins_mask = self.mask_whole_words.gather(0, item) - word_begins_idx = word_begins_mask.nonzero().view(-1) - sz = len(word_begins_idx) - words = np.split(word_begins_mask, word_begins_idx)[1:] - assert len(words) == sz - word_lens = list(map(len, words)) - - # decide elements to mask - mask = np.full(sz, False) - num_mask = int( - # add a random number for probabilistic rounding - self.mask_prob * sz / float(self.mask_multiple_length) - + np.random.rand() - ) - - # multiple masking as described in the vq-wav2vec paper (https://arxiv.org/abs/1910.05453) - mask_idc = np.random.choice(sz, num_mask, replace=False) - if self.mask_stdev > 0.0: - lengths = np.random.normal( - self.mask_multiple_length, self.mask_stdev, size=num_mask - ) - lengths = [max(0, int(round(x))) for x in lengths] - mask_idc = np.asarray( - [ - mask_idc[j] + offset - for j in range(len(mask_idc)) - for offset in range(lengths[j]) - ], - dtype=np.int64, - ) - else: - mask_idc = np.concatenate( - [mask_idc + i for i in range(self.mask_multiple_length)] - ) - mask_idc = mask_idc[mask_idc < len(mask)] - try: - mask[mask_idc] = True - except: # something wrong - print( - "Assigning mask indexes {} to mask {} failed!".format( - mask_idc, mask - ) - ) - raise - - if self.return_masked_tokens: - # exit early if we're just returning the masked tokens - # (i.e., the targets for masked LM training) - if self.mask_whole_words is not None: - mask = np.repeat(mask, word_lens) - new_item = np.full(len(mask), self.pad_idx) - new_item[mask] = item[torch.from_numpy(mask.astype(np.uint8)) == 1] - return torch.from_numpy(new_item) - - # decide unmasking and random replacement - rand_or_unmask_prob = self.random_token_prob + self.leave_unmasked_prob - if rand_or_unmask_prob > 0.0: - rand_or_unmask = mask & (np.random.rand(sz) < rand_or_unmask_prob) - if self.random_token_prob == 0.0: - unmask = rand_or_unmask - rand_mask = None - elif self.leave_unmasked_prob == 0.0: - unmask = None - rand_mask = rand_or_unmask - else: - unmask_prob = self.leave_unmasked_prob / rand_or_unmask_prob - decision = np.random.rand(sz) < unmask_prob - unmask = rand_or_unmask & decision - rand_mask = rand_or_unmask & (~decision) - else: - unmask = rand_mask = None - - if unmask is not None: - mask = mask ^ unmask - - if self.mask_whole_words is not None: - mask = np.repeat(mask, word_lens) - - new_item = np.copy(item) - new_item[mask] = self.mask_idx - if rand_mask is not None: - num_rand = rand_mask.sum() - if num_rand > 0: - if self.mask_whole_words is not None: - rand_mask = np.repeat(rand_mask, word_lens) - num_rand = rand_mask.sum() - - new_item[rand_mask] = np.random.choice( - len(self.vocab), - num_rand, - p=self.weights, - ) - - return torch.from_numpy(new_item) diff --git a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/util/misc.py b/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/util/misc.py deleted file mode 100644 index d64b84ef24bea0c98e76824feb1903f6bfebe7a5..0000000000000000000000000000000000000000 --- a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/util/misc.py +++ /dev/null @@ -1,717 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Misc functions, including distributed helpers. - -Mostly copy-paste from torchvision references. -""" -import colorsys -import datetime -import functools -import io -import json -import os -import pickle -import subprocess -import time -from collections import OrderedDict, defaultdict, deque -from typing import List, Optional - -import numpy as np -import torch -import torch.distributed as dist - -# needed due to empty tensor bug in pytorch and torchvision 0.5 -import torchvision -from torch import Tensor - -__torchvision_need_compat_flag = float(torchvision.__version__.split(".")[1]) < 7 -if __torchvision_need_compat_flag: - from torchvision.ops import _new_empty_tensor - from torchvision.ops.misc import _output_size - - -class SmoothedValue(object): - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - if d.shape[0] == 0: - return 0 - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - if os.environ.get("SHILONG_AMP", None) == "1": - eps = 1e-4 - else: - eps = 1e-6 - return self.total / (self.count + eps) - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, - avg=self.avg, - global_avg=self.global_avg, - max=self.max, - value=self.value, - ) - - -@functools.lru_cache() -def _get_global_gloo_group(): - """ - Return a process group based on gloo backend, containing all the ranks - The result is cached. - """ - - if dist.get_backend() == "nccl": - return dist.new_group(backend="gloo") - - return dist.group.WORLD - - -def all_gather_cpu(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - - world_size = get_world_size() - if world_size == 1: - return [data] - - cpu_group = _get_global_gloo_group() - - buffer = io.BytesIO() - torch.save(data, buffer) - data_view = buffer.getbuffer() - device = "cuda" if cpu_group is None else "cpu" - tensor = torch.ByteTensor(data_view).to(device) - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long) - size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)] - if cpu_group is None: - dist.all_gather(size_list, local_size) - else: - print("gathering on cpu") - dist.all_gather(size_list, local_size, group=cpu_group) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - assert isinstance(local_size.item(), int) - local_size = int(local_size.item()) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device)) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=device) - tensor = torch.cat((tensor, padding), dim=0) - if cpu_group is None: - dist.all_gather(tensor_list, tensor) - else: - dist.all_gather(tensor_list, tensor, group=cpu_group) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - tensor = torch.split(tensor, [size, max_size - size], dim=0)[0] - buffer = io.BytesIO(tensor.cpu().numpy()) - obj = torch.load(buffer) - data_list.append(obj) - - return data_list - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - - if os.getenv("CPU_REDUCE") == "1": - return all_gather_cpu(data) - - world_size = get_world_size() - if world_size == 1: - return [data] - - # serialized to a Tensor - buffer = pickle.dumps(data) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to("cuda") - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device="cuda") - size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] - dist.all_gather(size_list, local_size) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") - tensor = torch.cat((tensor, padding), dim=0) - dist.all_gather(tensor_list, tensor) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that all processes - have the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.no_grad(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.all_reduce(values) - if average: - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict - - -class MetricLogger(object): - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - # print(name, str(meter)) - # import ipdb;ipdb.set_trace() - if meter.count > 0: - loss_str.append("{}: {}".format(name, str(meter))) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None, logger=None): - if logger is None: - print_func = print - else: - print_func = logger.info - - i = 0 - if not header: - header = "" - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt="{avg:.4f}") - data_time = SmoothedValue(fmt="{avg:.4f}") - space_fmt = ":" + str(len(str(len(iterable)))) + "d" - if torch.cuda.is_available(): - log_msg = self.delimiter.join( - [ - header, - "[{0" + space_fmt + "}/{1}]", - "eta: {eta}", - "{meters}", - "time: {time}", - "data: {data}", - "max mem: {memory:.0f}", - ] - ) - else: - log_msg = self.delimiter.join( - [ - header, - "[{0" + space_fmt + "}/{1}]", - "eta: {eta}", - "{meters}", - "time: {time}", - "data: {data}", - ] - ) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - # import ipdb; ipdb.set_trace() - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print_func( - log_msg.format( - i, - len(iterable), - eta=eta_string, - meters=str(self), - time=str(iter_time), - data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB, - ) - ) - else: - print_func( - log_msg.format( - i, - len(iterable), - eta=eta_string, - meters=str(self), - time=str(iter_time), - data=str(data_time), - ) - ) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print_func( - "{} Total time: {} ({:.4f} s / it)".format( - header, total_time_str, total_time / len(iterable) - ) - ) - - -def get_sha(): - cwd = os.path.dirname(os.path.abspath(__file__)) - - def _run(command): - return subprocess.check_output(command, cwd=cwd).decode("ascii").strip() - - sha = "N/A" - diff = "clean" - branch = "N/A" - try: - sha = _run(["git", "rev-parse", "HEAD"]) - subprocess.check_output(["git", "diff"], cwd=cwd) - diff = _run(["git", "diff-index", "HEAD"]) - diff = "has uncommited changes" if diff else "clean" - branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"]) - except Exception: - pass - message = f"sha: {sha}, status: {diff}, branch: {branch}" - return message - - -def collate_fn(batch): - # import ipdb; ipdb.set_trace() - batch = list(zip(*batch)) - batch[0] = nested_tensor_from_tensor_list(batch[0]) - return tuple(batch) - - -def _max_by_axis(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - return maxes - - -class NestedTensor(object): - def __init__(self, tensors, mask: Optional[Tensor]): - self.tensors = tensors - self.mask = mask - if mask == "auto": - self.mask = torch.zeros_like(tensors).to(tensors.device) - if self.mask.dim() == 3: - self.mask = self.mask.sum(0).to(bool) - elif self.mask.dim() == 4: - self.mask = self.mask.sum(1).to(bool) - else: - raise ValueError( - "tensors dim must be 3 or 4 but {}({})".format( - self.tensors.dim(), self.tensors.shape - ) - ) - - def imgsize(self): - res = [] - for i in range(self.tensors.shape[0]): - mask = self.mask[i] - maxH = (~mask).sum(0).max() - maxW = (~mask).sum(1).max() - res.append(torch.Tensor([maxH, maxW])) - return res - - def to(self, device): - # type: (Device) -> NestedTensor # noqa - cast_tensor = self.tensors.to(device) - mask = self.mask - if mask is not None: - assert mask is not None - cast_mask = mask.to(device) - else: - cast_mask = None - return NestedTensor(cast_tensor, cast_mask) - - def to_img_list_single(self, tensor, mask): - assert tensor.dim() == 3, "dim of tensor should be 3 but {}".format(tensor.dim()) - maxH = (~mask).sum(0).max() - maxW = (~mask).sum(1).max() - img = tensor[:, :maxH, :maxW] - return img - - def to_img_list(self): - """remove the padding and convert to img list - - Returns: - [type]: [description] - """ - if self.tensors.dim() == 3: - return self.to_img_list_single(self.tensors, self.mask) - else: - res = [] - for i in range(self.tensors.shape[0]): - tensor_i = self.tensors[i] - mask_i = self.mask[i] - res.append(self.to_img_list_single(tensor_i, mask_i)) - return res - - @property - def device(self): - return self.tensors.device - - def decompose(self): - return self.tensors, self.mask - - def __repr__(self): - return str(self.tensors) - - @property - def shape(self): - return {"tensors.shape": self.tensors.shape, "mask.shape": self.mask.shape} - - -def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): - # TODO make this more general - if tensor_list[0].ndim == 3: - if torchvision._is_tracing(): - # nested_tensor_from_tensor_list() does not export well to ONNX - # call _onnx_nested_tensor_from_tensor_list() instead - return _onnx_nested_tensor_from_tensor_list(tensor_list) - - # TODO make it support different-sized images - max_size = _max_by_axis([list(img.shape) for img in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) - for img, pad_img, m in zip(tensor_list, tensor, mask): - pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - m[: img.shape[1], : img.shape[2]] = False - else: - raise ValueError("not supported") - return NestedTensor(tensor, mask) - - -# _onnx_nested_tensor_from_tensor_list() is an implementation of -# nested_tensor_from_tensor_list() that is supported by ONNX tracing. -@torch.jit.unused -def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: - max_size = [] - for i in range(tensor_list[0].dim()): - max_size_i = torch.max( - torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32) - ).to(torch.int64) - max_size.append(max_size_i) - max_size = tuple(max_size) - - # work around for - # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - # m[: img.shape[1], :img.shape[2]] = False - # which is not yet supported in onnx - padded_imgs = [] - padded_masks = [] - for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] - padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) - padded_imgs.append(padded_img) - - m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) - padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) - padded_masks.append(padded_mask.to(torch.bool)) - - tensor = torch.stack(padded_imgs) - mask = torch.stack(padded_masks) - - return NestedTensor(tensor, mask=mask) - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop("force", False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if "WORLD_SIZE" in os.environ and os.environ["WORLD_SIZE"] != "": # 'RANK' in os.environ and - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ["WORLD_SIZE"]) - args.gpu = args.local_rank = int(os.environ["LOCAL_RANK"]) - - # launch by torch.distributed.launch - # Single node - # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ... - # Multi nodes - # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ... - # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ... - # args.rank = int(os.environ.get('OMPI_COMM_WORLD_RANK')) - # local_world_size = int(os.environ['GPU_PER_NODE_COUNT']) - # args.world_size = args.world_size * local_world_size - # args.gpu = args.local_rank = int(os.environ['LOCAL_RANK']) - # args.rank = args.rank * local_world_size + args.local_rank - print( - "world size: {}, rank: {}, local rank: {}".format( - args.world_size, args.rank, args.local_rank - ) - ) - print(json.dumps(dict(os.environ), indent=2)) - elif "SLURM_PROCID" in os.environ: - args.rank = int(os.environ["SLURM_PROCID"]) - args.gpu = args.local_rank = int(os.environ["SLURM_LOCALID"]) - args.world_size = int(os.environ["SLURM_NPROCS"]) - - print( - "world size: {}, world rank: {}, local rank: {}, device_count: {}".format( - args.world_size, args.rank, args.local_rank, torch.cuda.device_count() - ) - ) - else: - print("Not using distributed mode") - args.distributed = False - args.world_size = 1 - args.rank = 0 - args.local_rank = 0 - return - - print("world_size:{} rank:{} local_rank:{}".format(args.world_size, args.rank, args.local_rank)) - args.distributed = True - torch.cuda.set_device(args.local_rank) - args.dist_backend = "nccl" - print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) - - torch.distributed.init_process_group( - backend=args.dist_backend, - world_size=args.world_size, - rank=args.rank, - init_method=args.dist_url, - ) - - print("Before torch.distributed.barrier()") - torch.distributed.barrier() - print("End torch.distributed.barrier()") - setup_for_distributed(args.rank == 0) - - -@torch.no_grad() -def accuracy(output, target, topk=(1,)): - """Computes the precision@k for the specified values of k""" - if target.numel() == 0: - return [torch.zeros([], device=output.device)] - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0) - res.append(correct_k.mul_(100.0 / batch_size)) - return res - - -@torch.no_grad() -def accuracy_onehot(pred, gt): - """_summary_ - - Args: - pred (_type_): n, c - gt (_type_): n, c - """ - tp = ((pred - gt).abs().sum(-1) < 1e-4).float().sum() - acc = tp / gt.shape[0] * 100 - return acc - - -def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): - # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor - """ - Equivalent to nn.functional.interpolate, but with support for empty batch sizes. - This will eventually be supported natively by PyTorch, and this - class can go away. - """ - if __torchvision_need_compat_flag < 0.7: - if input.numel() > 0: - return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners) - - output_shape = _output_size(2, input, size, scale_factor) - output_shape = list(input.shape[:-2]) + list(output_shape) - return _new_empty_tensor(input, output_shape) - else: - return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) - - -class color_sys: - def __init__(self, num_colors) -> None: - self.num_colors = num_colors - colors = [] - for i in np.arange(0.0, 360.0, 360.0 / num_colors): - hue = i / 360.0 - lightness = (50 + np.random.rand() * 10) / 100.0 - saturation = (90 + np.random.rand() * 10) / 100.0 - colors.append( - tuple([int(j * 255) for j in colorsys.hls_to_rgb(hue, lightness, saturation)]) - ) - self.colors = colors - - def __call__(self, idx): - return self.colors[idx] - - -def inverse_sigmoid(x, eps=1e-3): - x = x.clamp(min=0, max=1) - x1 = x.clamp(min=eps) - x2 = (1 - x).clamp(min=eps) - return torch.log(x1 / x2) - - -def clean_state_dict(state_dict): - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - if k[:7] == "module.": - k = k[7:] # remove `module.` - new_state_dict[k] = v - return new_state_dict diff --git a/spaces/Ikaros521/moe-tts/monotonic_align/__init__.py b/spaces/Ikaros521/moe-tts/monotonic_align/__init__.py deleted file mode 100644 index 40b6f64aa116c74cac2f6a33444c9eeea2fdb38c..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/moe-tts/monotonic_align/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) - diff --git a/spaces/Illumotion/Koboldcpp/ggml-backend.h b/spaces/Illumotion/Koboldcpp/ggml-backend.h deleted file mode 100644 index da134b0dbed514423b1223814b2346c4048a2854..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/ggml-backend.h +++ /dev/null @@ -1,143 +0,0 @@ -#pragma once - -#include "ggml.h" - -#ifdef __cplusplus -extern "C" { -#endif - struct ggml_backend; - struct ggml_backend_buffer; - - // type-erased backend-specific types / wrappers - typedef void * ggml_backend_context_t; - typedef void * ggml_backend_graph_plan_t; - typedef void * ggml_backend_buffer_context_t; - - // avoid accessing internals of these types - typedef struct ggml_backend * ggml_backend_t; - typedef struct ggml_backend_buffer * ggml_backend_buffer_t; - - // - // backend buffer - // - - struct ggml_backend_buffer_i { - void (*free_buffer) (ggml_backend_buffer_t buffer); - void * (*get_base) (ggml_backend_buffer_t buffer); // get base pointer - size_t (*get_alloc_size)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-allocation callback - void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // post-allocation callback - void (*free_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-free callback - }; - - // TODO: hide behind API - struct ggml_backend_buffer { - struct ggml_backend_buffer_i iface; - - ggml_backend_t backend; - ggml_backend_buffer_context_t context; - - size_t size; - }; - - // backend buffer functions - GGML_API ggml_backend_buffer_t ggml_backend_buffer_init( - struct ggml_backend * backend, - struct ggml_backend_buffer_i iface, - ggml_backend_buffer_context_t context, - size_t size); - - GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); - GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - GGML_API void ggml_backend_buffer_free_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - - // - // backend - // - - struct ggml_backend_i { - const char * (*get_name)(ggml_backend_t backend); - - void (*free)(ggml_backend_t backend); - - // buffer allocation - ggml_backend_buffer_t (*alloc_buffer)(ggml_backend_t backend, size_t size); - - // get buffer alignment - size_t (*get_alignment)(ggml_backend_t backend); - - // tensor data access - // these functions can be asynchronous, helper functions are provided for synchronous access that automatically call synchronize - void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - void (*synchronize) (ggml_backend_t backend); - - // (optional) copy tensor between different backends, allow for single-copy tranfers - void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); - void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); - - // compute graph with a plan - ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph); - void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan); - void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan); - - // compute graph without a plan - void (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph); - - // check if the backend supports an operation - bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op); - }; - - // TODO: hide behind API - struct ggml_backend { - struct ggml_backend_i iface; - - ggml_backend_context_t context; - }; - - // backend helper functions - GGML_API ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor); - - GGML_API const char * ggml_backend_name(ggml_backend_t backend); - GGML_API void ggml_backend_free(ggml_backend_t backend); - - GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size); - - GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend); - - GGML_API void ggml_backend_tensor_set_async( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - GGML_API void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - - GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - - GGML_API void ggml_backend_synchronize(ggml_backend_t backend); - - GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create (ggml_backend_t backend, struct ggml_cgraph * cgraph); - - GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan); - GGML_API void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan); - GGML_API void ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph); - GGML_API bool ggml_backend_supports_op (ggml_backend_t backend, const struct ggml_tensor * op); - - // tensor copy between different backends - GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst); - - // - // CPU backend - // - - GGML_API ggml_backend_t ggml_backend_cpu_init(void); - - GGML_API bool ggml_backend_is_cpu(ggml_backend_t backend); - - GGML_API void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads); - - GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size); - -#ifdef __cplusplus -} -#endif diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/segment_anything/modeling/transformer.py b/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/segment_anything/modeling/transformer.py deleted file mode 100644 index 28fafea52288603fea275f3a100790471825c34a..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/segment_anything/modeling/transformer.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from torch import Tensor, nn - -import math -from typing import Tuple, Type - -from .common import MLPBlock - - -class TwoWayTransformer(nn.Module): - def __init__( - self, - depth: int, - embedding_dim: int, - num_heads: int, - mlp_dim: int, - activation: Type[nn.Module] = nn.ReLU, - attention_downsample_rate: int = 2, - ) -> None: - """ - A transformer decoder that attends to an input image using - queries whose positional embedding is supplied. - - Args: - depth (int): number of layers in the transformer - embedding_dim (int): the channel dimension for the input embeddings - num_heads (int): the number of heads for multihead attention. Must - divide embedding_dim - mlp_dim (int): the channel dimension internal to the MLP block - activation (nn.Module): the activation to use in the MLP block - """ - super().__init__() - self.depth = depth - self.embedding_dim = embedding_dim - self.num_heads = num_heads - self.mlp_dim = mlp_dim - self.layers = nn.ModuleList() - - for i in range(depth): - self.layers.append( - TwoWayAttentionBlock( - embedding_dim=embedding_dim, - num_heads=num_heads, - mlp_dim=mlp_dim, - activation=activation, - attention_downsample_rate=attention_downsample_rate, - skip_first_layer_pe=(i == 0), - ) - ) - - self.final_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm_final_attn = nn.LayerNorm(embedding_dim) - - def forward( - self, - image_embedding: Tensor, - image_pe: Tensor, - point_embedding: Tensor, - ) -> Tuple[Tensor, Tensor]: - """ - Args: - image_embedding (torch.Tensor): image to attend to. Should be shape - B x embedding_dim x h x w for any h and w. - image_pe (torch.Tensor): the positional encoding to add to the image. Must - have the same shape as image_embedding. - point_embedding (torch.Tensor): the embedding to add to the query points. - Must have shape B x N_points x embedding_dim for any N_points. - - Returns: - torch.Tensor: the processed point_embedding - torch.Tensor: the processed image_embedding - """ - # BxCxHxW -> BxHWxC == B x N_image_tokens x C - bs, c, h, w = image_embedding.shape - image_embedding = image_embedding.flatten(2).permute(0, 2, 1) - image_pe = image_pe.flatten(2).permute(0, 2, 1) - - # Prepare queries - queries = point_embedding - keys = image_embedding - - # Apply transformer blocks and final layernorm - for layer in self.layers: - queries, keys = layer( - queries=queries, - keys=keys, - query_pe=point_embedding, - key_pe=image_pe, - ) - - # Apply the final attention layer from the points to the image - q = queries + point_embedding - k = keys + image_pe - attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) - queries = queries + attn_out - queries = self.norm_final_attn(queries) - - return queries, keys - - -class TwoWayAttentionBlock(nn.Module): - def __init__( - self, - embedding_dim: int, - num_heads: int, - mlp_dim: int = 2048, - activation: Type[nn.Module] = nn.ReLU, - attention_downsample_rate: int = 2, - skip_first_layer_pe: bool = False, - ) -> None: - """ - A transformer block with four layers: (1) self-attention of sparse - inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp - block on sparse inputs, and (4) cross attention of dense inputs to sparse - inputs. - - Arguments: - embedding_dim (int): the channel dimension of the embeddings - num_heads (int): the number of heads in the attention layers - mlp_dim (int): the hidden dimension of the mlp block - activation (nn.Module): the activation of the mlp block - skip_first_layer_pe (bool): skip the PE on the first layer - """ - super().__init__() - self.self_attn = Attention(embedding_dim, num_heads) - self.norm1 = nn.LayerNorm(embedding_dim) - - self.cross_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm2 = nn.LayerNorm(embedding_dim) - - self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) - self.norm3 = nn.LayerNorm(embedding_dim) - - self.norm4 = nn.LayerNorm(embedding_dim) - self.cross_attn_image_to_token = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - - self.skip_first_layer_pe = skip_first_layer_pe - - def forward( - self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor - ) -> Tuple[Tensor, Tensor]: - # Self attention block - if self.skip_first_layer_pe: - queries = self.self_attn(q=queries, k=queries, v=queries) - else: - q = queries + query_pe - attn_out = self.self_attn(q=q, k=q, v=queries) - queries = queries + attn_out - queries = self.norm1(queries) - - # Cross attention block, tokens attending to image embedding - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) - queries = queries + attn_out - queries = self.norm2(queries) - - # MLP block - mlp_out = self.mlp(queries) - queries = queries + mlp_out - queries = self.norm3(queries) - - # Cross attention block, image embedding attending to tokens - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) - keys = keys + attn_out - keys = self.norm4(keys) - - return queries, keys - - -class Attention(nn.Module): - """ - An attention layer that allows for downscaling the size of the embedding - after projection to queries, keys, and values. - """ - - def __init__( - self, - embedding_dim: int, - num_heads: int, - downsample_rate: int = 1, - ) -> None: - super().__init__() - self.embedding_dim = embedding_dim - self.internal_dim = embedding_dim // downsample_rate - self.num_heads = num_heads - assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." - - self.q_proj = nn.Linear(embedding_dim, self.internal_dim) - self.k_proj = nn.Linear(embedding_dim, self.internal_dim) - self.v_proj = nn.Linear(embedding_dim, self.internal_dim) - self.out_proj = nn.Linear(self.internal_dim, embedding_dim) - - def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: - b, n, c = x.shape - x = x.reshape(b, n, num_heads, c // num_heads) - return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head - - def _recombine_heads(self, x: Tensor) -> Tensor: - b, n_heads, n_tokens, c_per_head = x.shape - x = x.transpose(1, 2) - return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C - - def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: - # Input projections - q = self.q_proj(q) - k = self.k_proj(k) - v = self.v_proj(v) - - # Separate into heads - q = self._separate_heads(q, self.num_heads) - k = self._separate_heads(k, self.num_heads) - v = self._separate_heads(v, self.num_heads) - - # Attention - _, _, _, c_per_head = q.shape - attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens - attn = attn / math.sqrt(c_per_head) - attn = torch.softmax(attn, dim=-1) - - # Get output - out = attn @ v - out = self._recombine_heads(out) - out = self.out_proj(out) - - return out diff --git a/spaces/Jamkonams/AutoGPT/autogpt/commands/audio_text.py b/spaces/Jamkonams/AutoGPT/autogpt/commands/audio_text.py deleted file mode 100644 index cae32d4eb78c4268bf6ef1bae3c15a399af046bf..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/commands/audio_text.py +++ /dev/null @@ -1,36 +0,0 @@ -import json - -import requests - -from autogpt.config import Config -from autogpt.workspace import path_in_workspace - -cfg = Config() - - -def read_audio_from_file(audio_path): - audio_path = path_in_workspace(audio_path) - with open(audio_path, "rb") as audio_file: - audio = audio_file.read() - return read_audio(audio) - - -def read_audio(audio): - model = cfg.huggingface_audio_to_text_model - api_url = f"https://api-inference.huggingface.co/models/{model}" - api_token = cfg.huggingface_api_token - headers = {"Authorization": f"Bearer {api_token}"} - - if api_token is None: - raise ValueError( - "You need to set your Hugging Face API token in the config file." - ) - - response = requests.post( - api_url, - headers=headers, - data=audio, - ) - - text = json.loads(response.content.decode("utf-8"))["text"] - return "The audio says: " + text diff --git a/spaces/Jellyfish042/punctuation_mark_prediction/README.md b/spaces/Jellyfish042/punctuation_mark_prediction/README.md deleted file mode 100644 index 7fdb8a8aa5699cd504b71820ee6915ca61931ec1..0000000000000000000000000000000000000000 --- a/spaces/Jellyfish042/punctuation_mark_prediction/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Punctuation Mark Prediction -emoji: 📚 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Justin-Choo/Anzu-mix_WEB_UI/app.py b/spaces/Justin-Choo/Anzu-mix_WEB_UI/app.py deleted file mode 100644 index 3fd8c92aeadd5df73990f67bba938a969e181cd3..0000000000000000000000000000000000000000 --- a/spaces/Justin-Choo/Anzu-mix_WEB_UI/app.py +++ /dev/null @@ -1,149 +0,0 @@ -import os -from sys import executable as pyexecutable -import subprocess -import pathlib -import gc - -def Gitclone(URI:str,ClonePath:str = "") -> int : - if(ClonePath == "") : - while True: - i=subprocess.run([r"git",r"clone",URI]) - if(i.returncode == 0 ): - del i - gc.collect() - return 0 - else : - del i - else: - while True: - i=subprocess.run([r"git",r"clone",URI,ClonePath]) - if(i.returncode == 0 ): - del i - gc.collect() - return 0 - else : - del i -def DownLoad(URI:str,DownloadPath:str,DownLoadFileName:str ) -> int: - while (True): - i=subprocess.run([r"aria2c",r"-c",r"-x" ,r"16", r"-s",r"16", r"-k" ,r"1M" ,r"-m",r"0",r"--enable-mmap=false",r"--console-log-level=error",r"-d",DownloadPath,r"-o",DownLoadFileName,URI]); - if(i.returncode == 0 ): - del i - gc.collect() - return 0 - else : - del i -user_home =pathlib.Path.home().resolve() -os.chdir(str(user_home)) -#clone stable-diffusion-webui repo -print("cloning stable-diffusion-webui repo") -Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui.git",str(user_home / r"stable-diffusion-webui")) -os.chdir(str(user_home / r"stable-diffusion-webui")) -os.system("git reset --hard 89f9faa63388756314e8a1d96cf86bf5e0663045") -# - -#install extensions -print("installing extensions") -Gitclone(r"https://huggingface.co/embed/negative",str(user_home / r"stable-diffusion-webui" / r"embeddings" / r"negative")) -Gitclone(r"https://huggingface.co/embed/lora",str(user_home / r"stable-diffusion-webui" / r"models" / r"Lora" / r"positive")) -DownLoad(r"https://huggingface.co/embed/upscale/resolve/main/4x-UltraSharp.pth",str(user_home / r"stable-diffusion-webui" / r"models" / r"ESRGAN") ,r"4x-UltraSharp.pth") -while True: - if(subprocess.run([r"wget",r"https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py",r"-O",str(user_home / r"stable-diffusion-webui" / r"scripts" / r"run_n_times.py")]).returncode == 0): - break -Gitclone(r"https://github.com/deforum-art/deforum-for-automatic1111-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"deforum-for-automatic1111-webui" )) -#Gitclone(r"https://github.com/AlUlkesh/stable-diffusion-webui-images-browser",str(user_home / r"stable-diffusion-webui" / r"extensions"/ r"stable-diffusion-webui-images-browser")) -Gitclone(r"https://github.com/camenduru/stable-diffusion-webui-huggingface",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-huggingface")) -Gitclone(r"https://github.com/camenduru/sd-civitai-browser",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-civitai-browser")) -Gitclone(r"https://github.com/kohya-ss/sd-webui-additional-networks",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks")) -Gitclone(r"https://github.com/Mikubill/sd-webui-controlnet",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-controlnet")) -Gitclone(r"https://github.com/fkunn1326/openpose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"openpose-editor")) -Gitclone(r"https://github.com/jexom/sd-webui-depth-lib",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-depth-lib")) -Gitclone(r"https://github.com/hnmr293/posex",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"posex")) -Gitclone(r"https://github.com/nonnonstop/sd-webui-3d-open-pose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-3d-open-pose-editor")) -#中文本地化的请解除下一行的注释 -#Gitclone(r"https://github.com/dtlnor/stable-diffusion-webui-localization-zh_CN.git",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-localization-zh_CN")) -Gitclone(r"https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" , str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-tagcomplete")) -Gitclone(r"https://github.com/camenduru/sd-webui-tunnels",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-tunnels")) -Gitclone(r"https://github.com/etherealxx/batchlinks-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"batchlinks-webui")) -Gitclone(r"https://github.com/catppuccin/stable-diffusion-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-catppuccin")) - -#Gitclone(r"https://github.com/KohakuBueleaf/a1111-sd-webui-locon",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-locon" )) -Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui-rembg",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-rembg")) -Gitclone(r"https://github.com/ashen-sensored/stable-diffusion-webui-two-shot",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-two-shot")) -Gitclone(r"https://github.com/camenduru/sd_webui_stealth_pnginfo",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd_webui_stealth_pnginfo")) - -os.chdir(user_home / r"stable-diffusion-webui") - -#download ControlNet models -print("extensions dolwnload done .\ndownloading ControlNet models") -dList =[r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_openpose_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_scribble_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_seg_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_ip2p_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_shuffle_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_canny_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1p_sd15_depth_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_inpaint_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_lineart_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_mlsd_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_normalbae_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_openpose_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_scribble_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_seg_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_softedge_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15s2_lineart_anime_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1e_sd15_tile_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_style_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_seg_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_openpose_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_keypose_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd15v2.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd15v2.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd15v2.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_zoedepth_sd15v1.pth"] -for i in range(0,len(dList)): DownLoad(dList[i],str(user_home / "stable-diffusion-webui" / "extensions" / "sd-webui-controlnet" / "models"),pathlib.Path(dList[i]).name) -del dList - -#download model -#you can change model download address here -print("ControlNet models download done.\ndownloading model") -DownLoad(r"https://huggingface.co/natsusakiyomi/AnzuMix/resolve/main/AnzuMix-v1.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"AnzuMix-v1.safetensors") - -#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.5-pruned.ckpt") -#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.0.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.0.vae.pt") -#DownLoad(r"https://huggingface.co/gsdf/Counterfeit-V3.0/resolve/main/Counterfeit-V3.0_fp16.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"Counterfeit-V3.0_fp16.safetensors") -#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1B_orangemixs.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"AOM3A1B_orangemixs.safetensors") -#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"orangemix.vae.pt") -#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Baked%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_BakedVAE.safetensors") -#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Without%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_WithoutVAE.safetensors") -#DownLoad(r"https://civitai.com/api/download/models/9474",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"chilloutmix_NiPrunedFp16.safetensors") - -DownLoad(r"https://civitai.com/api/download/models/39885",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"Better_light.safetensors") -DownLoad(r"https://civitai.com/api/download/models/21065",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"LAS.safetensors") -DownLoad(r"https://civitai.com/api/download/models/39164",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"backlighting.safetensors") -#strt webui - -print("Done\nStarting Webui...") -os.chdir(user_home / r"stable-diffusion-webui") -while True: - ret=subprocess.run([r"python3" ,r"launch.py",r"--precision",r"full",r"--no-half",r"--no-half-vae",r"--enable-insecure-extension-access",r"--medvram",r"--skip-torch-cuda-test",r"--enable-console-prompts",r"--ui-settings-file="+str(pathlib.Path(__file__).parent /r"config.json")]) - if(ret.returncode == 0 ): - del ret - gc.collect() - else : - del ret - -del os ,user_home ,pyexecutable ,subprocess \ No newline at end of file diff --git a/spaces/KenjieDec/RemBG/rembg/bg.py b/spaces/KenjieDec/RemBG/rembg/bg.py deleted file mode 100644 index b5ce82a3042e126eb3326875742092d36fa5ed59..0000000000000000000000000000000000000000 --- a/spaces/KenjieDec/RemBG/rembg/bg.py +++ /dev/null @@ -1,201 +0,0 @@ -import io -from enum import Enum -from typing import Any, List, Optional, Tuple, Union - -import numpy as np -from cv2 import ( - BORDER_DEFAULT, - MORPH_ELLIPSE, - MORPH_OPEN, - GaussianBlur, - getStructuringElement, - morphologyEx, -) -from PIL import Image, ImageOps -from PIL.Image import Image as PILImage -from pymatting.alpha.estimate_alpha_cf import estimate_alpha_cf -from pymatting.foreground.estimate_foreground_ml import estimate_foreground_ml -from pymatting.util.util import stack_images -from scipy.ndimage import binary_erosion - -from .session_factory import new_session -from .sessions import sessions_class -from .sessions.base import BaseSession - -kernel = getStructuringElement(MORPH_ELLIPSE, (3, 3)) - - -class ReturnType(Enum): - BYTES = 0 - PILLOW = 1 - NDARRAY = 2 - - -def alpha_matting_cutout( - img: PILImage, - mask: PILImage, - foreground_threshold: int, - background_threshold: int, - erode_structure_size: int, -) -> PILImage: - if img.mode == "RGBA" or img.mode == "CMYK": - img = img.convert("RGB") - - img = np.asarray(img) - mask = np.asarray(mask) - - is_foreground = mask > foreground_threshold - is_background = mask < background_threshold - - structure = None - if erode_structure_size > 0: - structure = np.ones( - (erode_structure_size, erode_structure_size), dtype=np.uint8 - ) - - is_foreground = binary_erosion(is_foreground, structure=structure) - is_background = binary_erosion(is_background, structure=structure, border_value=1) - - trimap = np.full(mask.shape, dtype=np.uint8, fill_value=128) - trimap[is_foreground] = 255 - trimap[is_background] = 0 - - img_normalized = img / 255.0 - trimap_normalized = trimap / 255.0 - - alpha = estimate_alpha_cf(img_normalized, trimap_normalized) - foreground = estimate_foreground_ml(img_normalized, alpha) - cutout = stack_images(foreground, alpha) - - cutout = np.clip(cutout * 255, 0, 255).astype(np.uint8) - cutout = Image.fromarray(cutout) - - return cutout - - -def naive_cutout(img: PILImage, mask: PILImage) -> PILImage: - empty = Image.new("RGBA", (img.size), 0) - cutout = Image.composite(img, empty, mask) - return cutout - - -def get_concat_v_multi(imgs: List[PILImage]) -> PILImage: - pivot = imgs.pop(0) - for im in imgs: - pivot = get_concat_v(pivot, im) - return pivot - - -def get_concat_v(img1: PILImage, img2: PILImage) -> PILImage: - dst = Image.new("RGBA", (img1.width, img1.height + img2.height)) - dst.paste(img1, (0, 0)) - dst.paste(img2, (0, img1.height)) - return dst - - -def post_process(mask: np.ndarray) -> np.ndarray: - """ - Post Process the mask for a smooth boundary by applying Morphological Operations - Research based on paper: https://www.sciencedirect.com/science/article/pii/S2352914821000757 - args: - mask: Binary Numpy Mask - """ - mask = morphologyEx(mask, MORPH_OPEN, kernel) - mask = GaussianBlur(mask, (5, 5), sigmaX=2, sigmaY=2, borderType=BORDER_DEFAULT) - mask = np.where(mask < 127, 0, 255).astype(np.uint8) # convert again to binary - return mask - - -def apply_background_color(img: PILImage, color: Tuple[int, int, int, int]) -> PILImage: - r, g, b, a = color - colored_image = Image.new("RGBA", img.size, (r, g, b, a)) - colored_image.paste(img, mask=img) - - return colored_image - - -def fix_image_orientation(img: PILImage) -> PILImage: - return ImageOps.exif_transpose(img) - - -def download_models() -> None: - for session in sessions_class: - session.download_models() - - -def remove( - data: Union[bytes, PILImage, np.ndarray], - alpha_matting: bool = False, - alpha_matting_foreground_threshold: int = 240, - alpha_matting_background_threshold: int = 10, - alpha_matting_erode_size: int = 10, - session: Optional[BaseSession] = None, - only_mask: bool = False, - post_process_mask: bool = False, - bgcolor: Optional[Tuple[int, int, int, int]] = None, - *args: Optional[Any], - **kwargs: Optional[Any] -) -> Union[bytes, PILImage, np.ndarray]: - if isinstance(data, PILImage): - return_type = ReturnType.PILLOW - img = data - elif isinstance(data, bytes): - return_type = ReturnType.BYTES - img = Image.open(io.BytesIO(data)) - elif isinstance(data, np.ndarray): - return_type = ReturnType.NDARRAY - img = Image.fromarray(data) - else: - raise ValueError("Input type {} is not supported.".format(type(data))) - - # Fix image orientation - img = fix_image_orientation(img) - - if session is None: - session = new_session("u2net", *args, **kwargs) - - masks = session.predict(img, *args, **kwargs) - cutouts = [] - - for mask in masks: - if post_process_mask: - mask = Image.fromarray(post_process(np.array(mask))) - - if only_mask: - cutout = mask - - elif alpha_matting: - try: - cutout = alpha_matting_cutout( - img, - mask, - alpha_matting_foreground_threshold, - alpha_matting_background_threshold, - alpha_matting_erode_size, - ) - except ValueError: - cutout = naive_cutout(img, mask) - - else: - cutout = naive_cutout(img, mask) - - cutouts.append(cutout) - - cutout = img - if len(cutouts) > 0: - cutout = get_concat_v_multi(cutouts) - - if bgcolor is not None and not only_mask: - cutout = apply_background_color(cutout, bgcolor) - - if ReturnType.PILLOW == return_type: - return cutout - - if ReturnType.NDARRAY == return_type: - return np.asarray(cutout) - - bio = io.BytesIO() - cutout.save(bio, "PNG") - bio.seek(0) - - return bio.read() diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/preprocess.py b/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/preprocess.py deleted file mode 100644 index cde325c4163d6800404de214202d773addfff296..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/preprocess.py +++ /dev/null @@ -1,259 +0,0 @@ -from multiprocessing.pool import Pool -from synthesizer import audio -from functools import partial -from itertools import chain -from encoder import inference as encoder -from pathlib import Path -from utils import logmmse -from tqdm import tqdm -import numpy as np -import librosa - - -def preprocess_dataset(datasets_root: Path, out_dir: Path, n_processes: int, - skip_existing: bool, hparams, no_alignments: bool, - datasets_name: str, subfolders: str): - # Gather the input directories - dataset_root = datasets_root.joinpath(datasets_name) - input_dirs = [dataset_root.joinpath(subfolder.strip()) for subfolder in subfolders.split(",")] - print("\n ".join(map(str, ["Using data from:"] + input_dirs))) - assert all(input_dir.exists() for input_dir in input_dirs) - - # Create the output directories for each output file type - out_dir.joinpath("mels").mkdir(exist_ok=True) - out_dir.joinpath("audio").mkdir(exist_ok=True) - - # Create a metadata file - metadata_fpath = out_dir.joinpath("train.txt") - metadata_file = metadata_fpath.open("a" if skip_existing else "w", encoding="utf-8") - - # Preprocess the dataset - speaker_dirs = list(chain.from_iterable(input_dir.glob("*") for input_dir in input_dirs)) - func = partial(preprocess_speaker, out_dir=out_dir, skip_existing=skip_existing, - hparams=hparams, no_alignments=no_alignments) - job = Pool(n_processes).imap(func, speaker_dirs) - for speaker_metadata in tqdm(job, datasets_name, len(speaker_dirs), unit="speakers"): - for metadatum in speaker_metadata: - metadata_file.write("|".join(str(x) for x in metadatum) + "\n") - metadata_file.close() - - # Verify the contents of the metadata file - with metadata_fpath.open("r", encoding="utf-8") as metadata_file: - metadata = [line.split("|") for line in metadata_file] - mel_frames = sum([int(m[4]) for m in metadata]) - timesteps = sum([int(m[3]) for m in metadata]) - sample_rate = hparams.sample_rate - hours = (timesteps / sample_rate) / 3600 - print("The dataset consists of %d utterances, %d mel frames, %d audio timesteps (%.2f hours)." % - (len(metadata), mel_frames, timesteps, hours)) - print("Max input length (text chars): %d" % max(len(m[5]) for m in metadata)) - print("Max mel frames length: %d" % max(int(m[4]) for m in metadata)) - print("Max audio timesteps length: %d" % max(int(m[3]) for m in metadata)) - - -def preprocess_speaker(speaker_dir, out_dir: Path, skip_existing: bool, hparams, no_alignments: bool): - metadata = [] - for book_dir in speaker_dir.glob("*"): - if no_alignments: - # Gather the utterance audios and texts - # LibriTTS uses .wav but we will include extensions for compatibility with other datasets - extensions = ["*.wav", "*.flac", "*.mp3"] - for extension in extensions: - wav_fpaths = book_dir.glob(extension) - - for wav_fpath in wav_fpaths: - # Load the audio waveform - wav, _ = librosa.load(str(wav_fpath), hparams.sample_rate) - if hparams.rescale: - wav = wav / np.abs(wav).max() * hparams.rescaling_max - - # Get the corresponding text - # Check for .txt (for compatibility with other datasets) - text_fpath = wav_fpath.with_suffix(".txt") - if not text_fpath.exists(): - # Check for .normalized.txt (LibriTTS) - text_fpath = wav_fpath.with_suffix(".normalized.txt") - assert text_fpath.exists() - with text_fpath.open("r") as text_file: - text = "".join([line for line in text_file]) - text = text.replace("\"", "") - text = text.strip() - - # Process the utterance - metadata.append(process_utterance(wav, text, out_dir, str(wav_fpath.with_suffix("").name), - skip_existing, hparams)) - else: - # Process alignment file (LibriSpeech support) - # Gather the utterance audios and texts - try: - alignments_fpath = next(book_dir.glob("*.alignment.txt")) - with alignments_fpath.open("r") as alignments_file: - alignments = [line.rstrip().split(" ") for line in alignments_file] - except StopIteration: - # A few alignment files will be missing - continue - - # Iterate over each entry in the alignments file - for wav_fname, words, end_times in alignments: - wav_fpath = book_dir.joinpath(wav_fname + ".flac") - assert wav_fpath.exists() - words = words.replace("\"", "").split(",") - end_times = list(map(float, end_times.replace("\"", "").split(","))) - - # Process each sub-utterance - wavs, texts = split_on_silences(wav_fpath, words, end_times, hparams) - for i, (wav, text) in enumerate(zip(wavs, texts)): - sub_basename = "%s_%02d" % (wav_fname, i) - metadata.append(process_utterance(wav, text, out_dir, sub_basename, - skip_existing, hparams)) - - return [m for m in metadata if m is not None] - - -def split_on_silences(wav_fpath, words, end_times, hparams): - # Load the audio waveform - wav, _ = librosa.load(str(wav_fpath), hparams.sample_rate) - if hparams.rescale: - wav = wav / np.abs(wav).max() * hparams.rescaling_max - - words = np.array(words) - start_times = np.array([0.0] + end_times[:-1]) - end_times = np.array(end_times) - assert len(words) == len(end_times) == len(start_times) - assert words[0] == "" and words[-1] == "" - - # Find pauses that are too long - mask = (words == "") & (end_times - start_times >= hparams.silence_min_duration_split) - mask[0] = mask[-1] = True - breaks = np.where(mask)[0] - - # Profile the noise from the silences and perform noise reduction on the waveform - silence_times = [[start_times[i], end_times[i]] for i in breaks] - silence_times = (np.array(silence_times) * hparams.sample_rate).astype(np.int) - noisy_wav = np.concatenate([wav[stime[0]:stime[1]] for stime in silence_times]) - if len(noisy_wav) > hparams.sample_rate * 0.02: - profile = logmmse.profile_noise(noisy_wav, hparams.sample_rate) - wav = logmmse.denoise(wav, profile, eta=0) - - # Re-attach segments that are too short - segments = list(zip(breaks[:-1], breaks[1:])) - segment_durations = [start_times[end] - end_times[start] for start, end in segments] - i = 0 - while i < len(segments) and len(segments) > 1: - if segment_durations[i] < hparams.utterance_min_duration: - # See if the segment can be re-attached with the right or the left segment - left_duration = float("inf") if i == 0 else segment_durations[i - 1] - right_duration = float("inf") if i == len(segments) - 1 else segment_durations[i + 1] - joined_duration = segment_durations[i] + min(left_duration, right_duration) - - # Do not re-attach if it causes the joined utterance to be too long - if joined_duration > hparams.hop_size * hparams.max_mel_frames / hparams.sample_rate: - i += 1 - continue - - # Re-attach the segment with the neighbour of shortest duration - j = i - 1 if left_duration <= right_duration else i - segments[j] = (segments[j][0], segments[j + 1][1]) - segment_durations[j] = joined_duration - del segments[j + 1], segment_durations[j + 1] - else: - i += 1 - - # Split the utterance - segment_times = [[end_times[start], start_times[end]] for start, end in segments] - segment_times = (np.array(segment_times) * hparams.sample_rate).astype(np.int) - wavs = [wav[segment_time[0]:segment_time[1]] for segment_time in segment_times] - texts = [" ".join(words[start + 1:end]).replace(" ", " ") for start, end in segments] - - # # DEBUG: play the audio segments (run with -n=1) - # import sounddevice as sd - # if len(wavs) > 1: - # print("This sentence was split in %d segments:" % len(wavs)) - # else: - # print("There are no silences long enough for this sentence to be split:") - # for wav, text in zip(wavs, texts): - # # Pad the waveform with 1 second of silence because sounddevice tends to cut them early - # # when playing them. You shouldn't need to do that in your parsers. - # wav = np.concatenate((wav, [0] * 16000)) - # print("\t%s" % text) - # sd.play(wav, 16000, blocking=True) - # print("") - - return wavs, texts - - -def process_utterance(wav: np.ndarray, text: str, out_dir: Path, basename: str, - skip_existing: bool, hparams): - ## FOR REFERENCE: - # For you not to lose your head if you ever wish to change things here or implement your own - # synthesizer. - # - Both the audios and the mel spectrograms are saved as numpy arrays - # - There is no processing done to the audios that will be saved to disk beyond volume - # normalization (in split_on_silences) - # - However, pre-emphasis is applied to the audios before computing the mel spectrogram. This - # is why we re-apply it on the audio on the side of the vocoder. - # - Librosa pads the waveform before computing the mel spectrogram. Here, the waveform is saved - # without extra padding. This means that you won't have an exact relation between the length - # of the wav and of the mel spectrogram. See the vocoder data loader. - - - # Skip existing utterances if needed - mel_fpath = out_dir.joinpath("mels", "mel-%s.npy" % basename) - wav_fpath = out_dir.joinpath("audio", "audio-%s.npy" % basename) - if skip_existing and mel_fpath.exists() and wav_fpath.exists(): - return None - - # Trim silence - if hparams.trim_silence: - wav = encoder.preprocess_wav(wav, normalize=False, trim_silence=True) - - # Skip utterances that are too short - if len(wav) < hparams.utterance_min_duration * hparams.sample_rate: - return None - - # Compute the mel spectrogram - mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32) - mel_frames = mel_spectrogram.shape[1] - - # Skip utterances that are too long - if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length: - return None - - # Write the spectrogram, embed and audio to disk - np.save(mel_fpath, mel_spectrogram.T, allow_pickle=False) - np.save(wav_fpath, wav, allow_pickle=False) - - # Return a tuple describing this training example - return wav_fpath.name, mel_fpath.name, "embed-%s.npy" % basename, len(wav), mel_frames, text - - -def embed_utterance(fpaths, encoder_model_fpath): - if not encoder.is_loaded(): - encoder.load_model(encoder_model_fpath) - - # Compute the speaker embedding of the utterance - wav_fpath, embed_fpath = fpaths - wav = np.load(wav_fpath) - wav = encoder.preprocess_wav(wav) - embed = encoder.embed_utterance(wav) - np.save(embed_fpath, embed, allow_pickle=False) - - -def create_embeddings(synthesizer_root: Path, encoder_model_fpath: Path, n_processes: int): - wav_dir = synthesizer_root.joinpath("audio") - metadata_fpath = synthesizer_root.joinpath("train.txt") - assert wav_dir.exists() and metadata_fpath.exists() - embed_dir = synthesizer_root.joinpath("embeds") - embed_dir.mkdir(exist_ok=True) - - # Gather the input wave filepath and the target output embed filepath - with metadata_fpath.open("r") as metadata_file: - metadata = [line.split("|") for line in metadata_file] - fpaths = [(wav_dir.joinpath(m[0]), embed_dir.joinpath(m[2])) for m in metadata] - - # TODO: improve on the multiprocessing, it's terrible. Disk I/O is the bottleneck here. - # Embed the utterances in separate threads - func = partial(embed_utterance, encoder_model_fpath=encoder_model_fpath) - job = Pool(n_processes).imap(func, fpaths) - list(tqdm(job, "Embedding", len(fpaths), unit="utterances")) - diff --git a/spaces/Kimata/multimodal_deepfake_detection/data/generate_dataset_to_tfrecord.py b/spaces/Kimata/multimodal_deepfake_detection/data/generate_dataset_to_tfrecord.py deleted file mode 100644 index dfe07905cebfa505ac8e0a39bce810fd3d222ed8..0000000000000000000000000000000000000000 --- a/spaces/Kimata/multimodal_deepfake_detection/data/generate_dataset_to_tfrecord.py +++ /dev/null @@ -1,178 +0,0 @@ -#Code outsourced from https://github.com/deepmind/dmvr/tree/master and later modified. - -"""Python script to generate TFRecords of SequenceExample from raw videos.""" - -import contextlib -import math -import os -import cv2 -from typing import Dict, Optional, Sequence -import moviepy.editor -from absl import app -from absl import flags -import ffmpeg -import numpy as np -import pandas as pd -import tensorflow as tf - -import warnings -warnings.filterwarnings('ignore') - -flags.DEFINE_string("csv_path", "fakeavceleb_1k.csv", "Input csv") -flags.DEFINE_string("output_path", "fakeavceleb_tfrec", "Tfrecords output path.") -flags.DEFINE_string("video_root_path", "./", - "Root directory containing the raw videos.") -flags.DEFINE_integer( - "num_shards", 4, "Number of shards to output, -1 means" - "it will automatically adapt to the sqrt(num_examples).") -flags.DEFINE_bool("decode_audio", False, "Whether or not to decode the audio") -flags.DEFINE_bool("shuffle_csv", False, "Whether or not to shuffle the csv.") -FLAGS = flags.FLAGS - - -_JPEG_HEADER = b"\xff\xd8" - - -@contextlib.contextmanager -def _close_on_exit(writers): - """Call close on all writers on exit.""" - try: - yield writers - finally: - for writer in writers: - writer.close() - - -def add_float_list(key: str, values: Sequence[float], - sequence: tf.train.SequenceExample): - sequence.feature_lists.feature_list[key].feature.add( - ).float_list.value[:] = values - - -def add_bytes_list(key: str, values: Sequence[bytes], - sequence: tf.train.SequenceExample): - sequence.feature_lists.feature_list[key].feature.add().bytes_list.value[:] = values - - -def add_int_list(key: str, values: Sequence[int], - sequence: tf.train.SequenceExample): - sequence.feature_lists.feature_list[key].feature.add().int64_list.value[:] = values - - -def set_context_int_list(key: str, value: Sequence[int], - sequence: tf.train.SequenceExample): - sequence.context.feature[key].int64_list.value[:] = value - - -def set_context_bytes(key: str, value: bytes, - sequence: tf.train.SequenceExample): - sequence.context.feature[key].bytes_list.value[:] = (value,) - -def set_context_bytes_list(key: str, value: Sequence[bytes], - sequence: tf.train.SequenceExample): - sequence.context.feature[key].bytes_list.value[:] = value - - -def set_context_float(key: str, value: float, - sequence: tf.train.SequenceExample): - sequence.context.feature[key].float_list.value[:] = (value,) - - -def set_context_int(key: str, value: int, sequence: tf.train.SequenceExample): - sequence.context.feature[key].int64_list.value[:] = (value,) - - -def extract_frames(video_path, fps = 10, min_resize = 256): - '''Load n number of frames from a video''' - v_cap = cv2.VideoCapture(video_path) - v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT)) - - if fps is None: - sample = np.arange(0, v_len) - else: - sample = np.linspace(0, v_len - 1, fps).astype(int) - - frames = [] - for j in range(v_len): - success = v_cap.grab() - if j in sample: - success, frame = v_cap.retrieve() - if not success: - continue - - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - frame = cv2.resize(frame, (min_resize, min_resize)) - frames.append(frame) - - v_cap.release() - frame_np = np.stack(frames) - return frame_np.tobytes() - -def extract_audio(video_path: str, - sampling_rate: int = 16_000): - """Extract raw mono audio float list from video_path with ffmpeg.""" - video = moviepy.editor.VideoFileClip(video_path) - audio = video.audio.to_soundarray() - #Load first channel. - audio = audio[:, 0] - - return np.array(audio) - -#Each of the features can be coerced into a tf.train.Example-compatible type using one of the _bytes_feature, _float_feature and the _int64_feature. -#You can then create a tf.train.Example message from these encoded features. - -def serialize_example(video_path: str, label_name: str, label_map: Optional[Dict[str, int]] = None): - # Initiate the sequence example. - seq_example = tf.train.SequenceExample() - - imgs_encoded = extract_frames(video_path, fps = 10) - - audio = extract_audio(video_path) - - set_context_bytes(f'image/encoded', imgs_encoded, seq_example) - set_context_bytes("video_path", video_path.encode(), seq_example) - set_context_bytes("WAVEFORM/feature/floats", audio.tobytes(), seq_example) - set_context_int("clip/label/index", label_map[label_name], seq_example) - set_context_bytes("clip/label/text", label_name.encode(), seq_example) - return seq_example - - -def main(argv): - del argv - # reads the input csv. - input_csv = pd.read_csv(FLAGS.csv_path) - if FLAGS.num_shards == -1: - num_shards = int(math.sqrt(len(input_csv))) - else: - num_shards = FLAGS.num_shards - # Set up the TFRecordWriters. - basename = os.path.splitext(os.path.basename(FLAGS.csv_path))[0] - shard_names = [ - os.path.join(FLAGS.output_path, f"{basename}-{i:05d}-of-{num_shards:05d}") - for i in range(num_shards) - ] - writers = [tf.io.TFRecordWriter(shard_name) for shard_name in shard_names] - - if "label" in input_csv: - unique_labels = list(set(input_csv["label"].values)) - l_map = {unique_labels[i]: i for i in range(len(unique_labels))} - else: - l_map = None - - if FLAGS.shuffle_csv: - input_csv = input_csv.sample(frac=1) - with _close_on_exit(writers) as writers: - row_count = 0 - for row in input_csv.itertuples(): - index = row[0] - v = row[1] - if os.name == 'posix': - v = v.str.replace('\\', '/') - l = row[2] - row_count += 1 - print("Processing example %d of %d (%d%%) \r" %(row_count, len(input_csv), row_count * 100 / len(input_csv)), end="") - seq_ex = serialize_example(video_path = v, label_name = l,label_map = l_map) - writers[index % len(writers)].write(seq_ex.SerializeToString()) - -if __name__ == "__main__": - app.run(main) diff --git a/spaces/KonradSzafer/HF-QA-Demo/update_space.py b/spaces/KonradSzafer/HF-QA-Demo/update_space.py deleted file mode 100644 index 5e2c9816fb231bedface276b52c9438b9f4be814..0000000000000000000000000000000000000000 --- a/spaces/KonradSzafer/HF-QA-Demo/update_space.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import shutil -import subprocess -from pathlib import Path - - -COMMON_FILES = ['.git', 'README.md', __file__.split('/')[-1]] - - -def remove_old_files(): - filenames = os.listdir('./') - filenames = [f for f in filenames if f not in COMMON_FILES] - for file_path in filenames: - p = Path(file_path) - if p.exists(): - if p.is_file(): - p.unlink() - elif p.is_dir(): - shutil.rmtree(p) - - -def clone_repository(): - repo_url = 'https://github.com/KonradSzafer/hugging-face-qa-bot.git' - subprocess.run(['git', 'clone', repo_url]) - - -def copy_files(): - src = './hugging-face-qa-bot' - for item in COMMON_FILES: - full_path = os.path.join(src, item) - if os.path.isfile(full_path): - os.remove(full_path) - elif os.path.isdir(full_path): - shutil.rmtree(full_path) - for item in Path(src).iterdir(): - shutil.move(str(item), '.') - shutil.rmtree(src) - - -if __name__ == '__main__': - remove_old_files() - clone_repository() - copy_files() diff --git a/spaces/KyanChen/RSPrompter/mmdet/datasets/transforms/instaboost.py b/spaces/KyanChen/RSPrompter/mmdet/datasets/transforms/instaboost.py deleted file mode 100644 index 30dc1603643ec8d398bfade95f5ec1c9b8f89c8d..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/datasets/transforms/instaboost.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple - -import numpy as np -from mmcv.transforms import BaseTransform - -from mmdet.registry import TRANSFORMS - - -@TRANSFORMS.register_module() -class InstaBoost(BaseTransform): - r"""Data augmentation method in `InstaBoost: Boosting Instance - Segmentation Via Probability Map Guided Copy-Pasting - `_. - - Refer to https://github.com/GothicAi/Instaboost for implementation details. - - - Required Keys: - - - img (np.uint8) - - instances - - Modified Keys: - - - img (np.uint8) - - instances - - Args: - action_candidate (tuple): Action candidates. "normal", "horizontal", \ - "vertical", "skip" are supported. Defaults to ('normal', \ - 'horizontal', 'skip'). - action_prob (tuple): Corresponding action probabilities. Should be \ - the same length as action_candidate. Defaults to (1, 0, 0). - scale (tuple): (min scale, max scale). Defaults to (0.8, 1.2). - dx (int): The maximum x-axis shift will be (instance width) / dx. - Defaults to 15. - dy (int): The maximum y-axis shift will be (instance height) / dy. - Defaults to 15. - theta (tuple): (min rotation degree, max rotation degree). \ - Defaults to (-1, 1). - color_prob (float): Probability of images for color augmentation. - Defaults to 0.5. - hflag (bool): Whether to use heatmap guided. Defaults to False. - aug_ratio (float): Probability of applying this transformation. \ - Defaults to 0.5. - """ - - def __init__(self, - action_candidate: tuple = ('normal', 'horizontal', 'skip'), - action_prob: tuple = (1, 0, 0), - scale: tuple = (0.8, 1.2), - dx: int = 15, - dy: int = 15, - theta: tuple = (-1, 1), - color_prob: float = 0.5, - hflag: bool = False, - aug_ratio: float = 0.5) -> None: - - import matplotlib - import matplotlib.pyplot as plt - default_backend = plt.get_backend() - - try: - import instaboostfast as instaboost - except ImportError: - raise ImportError( - 'Please run "pip install instaboostfast" ' - 'to install instaboostfast first for instaboost augmentation.') - - # instaboost will modify the default backend - # and cause visualization to fail. - matplotlib.use(default_backend) - - self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, - scale, dx, dy, theta, - color_prob, hflag) - self.aug_ratio = aug_ratio - - def _load_anns(self, results: dict) -> Tuple[list, list]: - """Convert raw anns to instaboost expected input format.""" - anns = [] - ignore_anns = [] - for instance in results['instances']: - label = instance['bbox_label'] - bbox = instance['bbox'] - mask = instance['mask'] - x1, y1, x2, y2 = bbox - # assert (x2 - x1) >= 1 and (y2 - y1) >= 1 - bbox = [x1, y1, x2 - x1, y2 - y1] - - if instance['ignore_flag'] == 0: - anns.append({ - 'category_id': label, - 'segmentation': mask, - 'bbox': bbox - }) - else: - # Ignore instances without data augmentation - ignore_anns.append(instance) - return anns, ignore_anns - - def _parse_anns(self, results: dict, anns: list, ignore_anns: list, - img: np.ndarray) -> dict: - """Restore the result of instaboost processing to the original anns - format.""" - instances = [] - for ann in anns: - x1, y1, w, h = ann['bbox'] - # TODO: more essential bug need to be fixed in instaboost - if w <= 0 or h <= 0: - continue - bbox = [x1, y1, x1 + w, y1 + h] - instances.append( - dict( - bbox=bbox, - bbox_label=ann['category_id'], - mask=ann['segmentation'], - ignore_flag=0)) - - instances.extend(ignore_anns) - results['img'] = img - results['instances'] = instances - return results - - def transform(self, results) -> dict: - """The transform function.""" - img = results['img'] - ori_type = img.dtype - if 'instances' not in results or len(results['instances']) == 0: - return results - - anns, ignore_anns = self._load_anns(results) - if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]): - try: - import instaboostfast as instaboost - except ImportError: - raise ImportError('Please run "pip install instaboostfast" ' - 'to install instaboostfast first.') - anns, img = instaboost.get_new_data( - anns, img.astype(np.uint8), self.cfg, background=None) - - results = self._parse_anns(results, anns, ignore_anns, - img.astype(ori_type)) - return results - - def __repr__(self) -> str: - repr_str = self.__class__.__name__ - repr_str += f'(aug_ratio={self.aug_ratio})' - return repr_str diff --git a/spaces/L0SG/BigVGAN/env.py b/spaces/L0SG/BigVGAN/env.py deleted file mode 100644 index b8be238d4db710c8c9a338d336baea0138f18d1f..0000000000000000000000000000000000000000 --- a/spaces/L0SG/BigVGAN/env.py +++ /dev/null @@ -1,18 +0,0 @@ -# Adapted from https://github.com/jik876/hifi-gan under the MIT license. -# LICENSE is in incl_licenses directory. - -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) \ No newline at end of file diff --git a/spaces/Li2024/chatai/app.py b/spaces/Li2024/chatai/app.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/recog_models/crnn.py b/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/recog_models/crnn.py deleted file mode 100644 index b316c6a8a7f4f79c0cff3062583391b746f3cad8..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/recog_models/crnn.py +++ /dev/null @@ -1,12 +0,0 @@ -label_convertor = dict( - type='CTCConvertor', dict_type='DICT36', with_unknown=False, lower=True) - -model = dict( - type='CRNNNet', - preprocessor=None, - backbone=dict(type='VeryDeepVgg', leaky_relu=False, input_channels=1), - encoder=None, - decoder=dict(type='CRNNDecoder', in_channels=512, rnn_flag=True), - loss=dict(type='CTCLoss'), - label_convertor=label_convertor, - pretrained=None) diff --git a/spaces/MLIFY/Chatter/README.md b/spaces/MLIFY/Chatter/README.md deleted file mode 100644 index 58cf4a463e0bf944c4622fe067cbf1c5933939b0..0000000000000000000000000000000000000000 --- a/spaces/MLIFY/Chatter/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Chatter -emoji: 💬 -colorFrom: indigo -colorTo: red -sdk: static -pinned: false -license: apache-2.0 ---- -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_utils.py b/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_utils.py deleted file mode 100644 index 9db52388361fd384d41e3e37c6a2affdb45444df..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_utils.py +++ /dev/null @@ -1,21 +0,0 @@ -import torch -import numpy as np -import random -import onnxruntime as ort -def set_random_seed(seed=0): - ort.set_seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.backends.cudnn.deterministic = True - random.seed(seed) - np.random.seed(seed) - -def runonnx(model_path, **kwargs): - #如使用onnx-gpu,则providers三选一 - #ort_session = ort.InferenceSession(model_path, providers=['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']) - ort_session = ort.InferenceSession(model_path) - outputs = ort_session.run( - None, - kwargs - ) - return outputs \ No newline at end of file diff --git a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/data_utils.py b/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/data_utils.py deleted file mode 100644 index e9246c6c8f2ff3c37a7f8529ea1593c7f80f887e..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/data_utils.py +++ /dev/null @@ -1,393 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import commons -from mel_processing import spectrogram_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import text_to_sequence, cleaned_text_to_sequence - - -class TextAudioLoader(torch.utils.data.Dataset): - """ - 1) loads audio, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - def __init__(self, audiopaths_and_text, hparams): - self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - - random.seed(1234) - random.shuffle(self.audiopaths_and_text) - self._filter() - - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_and_text_new = [] - lengths = [] - for audiopath, text in self.audiopaths_and_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_and_text_new.append([audiopath, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_and_text = audiopaths_and_text_new - self.lengths = lengths - - def get_audio_text_pair(self, audiopath_and_text): - # separate filename and text - audiopath, text = audiopath_and_text[0], audiopath_and_text[1] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - return (text, spec, wav) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def __getitem__(self, index): - return self.get_audio_text_pair(self.audiopaths_and_text[index]) - - def __len__(self): - return len(self.audiopaths_and_text) - - -class TextAudioCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths - - -"""Multi speaker version""" -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - def __init__(self, audiopaths_sid_text, hparams): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - for audiopath, sid, text in self.audiopaths_sid_text: - audiopath = "E:/uma_voice/" + audiopath - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_sid_text_new.append([audiopath, sid, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - sid = self.get_sid(sid) - return (text, spec, wav, sid) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i+1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/transformer.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/transformer.py deleted file mode 100644 index f1a2812f613cc55b1d0b3e3e1d0c84a760d1fb87..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/transformer.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from torch import Tensor, nn - -import math -from typing import Tuple, Type - -from .common import MLPBlock - - -class TwoWayTransformer(nn.Module): - def __init__( - self, - depth: int, - embedding_dim: int, - num_heads: int, - mlp_dim: int, - activation: Type[nn.Module] = nn.ReLU, - attention_downsample_rate: int = 2, - ) -> None: - """ - A transformer decoder that attends to an input image using - queries whose positional embedding is supplied. - - Args: - depth (int): number of layers in the transformer - embedding_dim (int): the channel dimension for the input embeddings - num_heads (int): the number of heads for multihead attention. Must - divide embedding_dim - mlp_dim (int): the channel dimension internal to the MLP block - activation (nn.Module): the activation to use in the MLP block - """ - super().__init__() - self.depth = depth - self.embedding_dim = embedding_dim - self.num_heads = num_heads - self.mlp_dim = mlp_dim - self.layers = nn.ModuleList() - - for i in range(depth): - self.layers.append( - TwoWayAttentionBlock( - embedding_dim=embedding_dim, - num_heads=num_heads, - mlp_dim=mlp_dim, - activation=activation, - attention_downsample_rate=attention_downsample_rate, - skip_first_layer_pe=(i == 0), - ) - ) - - self.final_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm_final_attn = nn.LayerNorm(embedding_dim) - - def forward( - self, - image_embedding: Tensor, - image_pe: Tensor, - point_embedding: Tensor, - ) -> Tuple[Tensor, Tensor]: - """ - Args: - image_embedding (torch.Tensor): image to attend to. Should be shape - B x embedding_dim x h x w for any h and w. - image_pe (torch.Tensor): the positional encoding to add to the image. Must - have the same shape as image_embedding. - point_embedding (torch.Tensor): the embedding to add to the query points. - Must have shape B x N_points x embedding_dim for any N_points. - - Returns: - torch.Tensor: the processed point_embedding - torch.Tensor: the processed image_embedding - """ - # BxCxHxW -> BxHWxC == B x N_image_tokens x C - bs, c, h, w = image_embedding.shape - image_embedding = image_embedding.flatten(2).permute(0, 2, 1) - image_pe = image_pe.flatten(2).permute(0, 2, 1) - - # Prepare queries - queries = point_embedding - keys = image_embedding - - # Apply transformer blocks and final layernorm - for layer in self.layers: - queries, keys = layer( - queries=queries, - keys=keys, - query_pe=point_embedding, - key_pe=image_pe, - ) - - # Apply the final attenion layer from the points to the image - q = queries + point_embedding - k = keys + image_pe - attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) - queries = queries + attn_out - queries = self.norm_final_attn(queries) - - return queries, keys - - -class TwoWayAttentionBlock(nn.Module): - def __init__( - self, - embedding_dim: int, - num_heads: int, - mlp_dim: int = 2048, - activation: Type[nn.Module] = nn.ReLU, - attention_downsample_rate: int = 2, - skip_first_layer_pe: bool = False, - ) -> None: - """ - A transformer block with four layers: (1) self-attention of sparse - inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp - block on sparse inputs, and (4) cross attention of dense inputs to sparse - inputs. - - Arguments: - embedding_dim (int): the channel dimension of the embeddings - num_heads (int): the number of heads in the attention layers - mlp_dim (int): the hidden dimension of the mlp block - activation (nn.Module): the activation of the mlp block - skip_first_layer_pe (bool): skip the PE on the first layer - """ - super().__init__() - self.self_attn = Attention(embedding_dim, num_heads) - self.norm1 = nn.LayerNorm(embedding_dim) - - self.cross_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm2 = nn.LayerNorm(embedding_dim) - - self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) - self.norm3 = nn.LayerNorm(embedding_dim) - - self.norm4 = nn.LayerNorm(embedding_dim) - self.cross_attn_image_to_token = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - - self.skip_first_layer_pe = skip_first_layer_pe - - def forward( - self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor - ) -> Tuple[Tensor, Tensor]: - # Self attention block - if self.skip_first_layer_pe: - queries = self.self_attn(q=queries, k=queries, v=queries) - else: - q = queries + query_pe - attn_out = self.self_attn(q=q, k=q, v=queries) - queries = queries + attn_out - queries = self.norm1(queries) - - # Cross attention block, tokens attending to image embedding - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) - queries = queries + attn_out - queries = self.norm2(queries) - - # MLP block - mlp_out = self.mlp(queries) - queries = queries + mlp_out - queries = self.norm3(queries) - - # Cross attention block, image embedding attending to tokens - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) - keys = keys + attn_out - keys = self.norm4(keys) - - return queries, keys - - -class Attention(nn.Module): - """ - An attention layer that allows for downscaling the size of the embedding - after projection to queries, keys, and values. - """ - - def __init__( - self, - embedding_dim: int, - num_heads: int, - downsample_rate: int = 1, - ) -> None: - super().__init__() - self.embedding_dim = embedding_dim - self.internal_dim = embedding_dim // downsample_rate - self.num_heads = num_heads - assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." - - self.q_proj = nn.Linear(embedding_dim, self.internal_dim) - self.k_proj = nn.Linear(embedding_dim, self.internal_dim) - self.v_proj = nn.Linear(embedding_dim, self.internal_dim) - self.out_proj = nn.Linear(self.internal_dim, embedding_dim) - - def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: - b, n, c = x.shape - x = x.reshape(b, n, num_heads, c // num_heads) - return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head - - def _recombine_heads(self, x: Tensor) -> Tensor: - b, n_heads, n_tokens, c_per_head = x.shape - x = x.transpose(1, 2) - return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C - - def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: - # Input projections - q = self.q_proj(q) - k = self.k_proj(k) - v = self.v_proj(v) - - # Separate into heads - q = self._separate_heads(q, self.num_heads) - k = self._separate_heads(k, self.num_heads) - v = self._separate_heads(v, self.num_heads) - - # Attention - _, _, _, c_per_head = q.shape - attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens - attn = attn / math.sqrt(c_per_head) - attn = torch.softmax(attn, dim=-1) - - # Get output - out = attn @ v - out = self._recombine_heads(out) - out = self.out_proj(out) - - return out diff --git a/spaces/Markfm/webui2/README.md b/spaces/Markfm/webui2/README.md deleted file mode 100644 index f91194f7d6b1b5b58905387d8e785f781e3e0049..0000000000000000000000000000000000000000 --- a/spaces/Markfm/webui2/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Webui -emoji: 🚧 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -duplicated_from: noteh65036/webui ---- diff --git a/spaces/MatzeFix/openai-whisper-large-v2/app.py b/spaces/MatzeFix/openai-whisper-large-v2/app.py deleted file mode 100644 index dbb9d9791843686edb94d5f60b142d78896fef12..0000000000000000000000000000000000000000 --- a/spaces/MatzeFix/openai-whisper-large-v2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/openai/whisper-large-v2").launch() \ No newline at end of file diff --git a/spaces/MetaWabbit/Auto-GPT/scripts/check_requirements.py b/spaces/MetaWabbit/Auto-GPT/scripts/check_requirements.py deleted file mode 100644 index e4eab024a6280c0d54110c69b2e03de639325fa6..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/scripts/check_requirements.py +++ /dev/null @@ -1,32 +0,0 @@ -import sys - -import pkg_resources - - -def main(): - requirements_file = sys.argv[1] - with open(requirements_file, "r") as f: - required_packages = [ - line.strip().split("#")[0].strip() for line in f.readlines() - ] - - installed_packages = [package.key for package in pkg_resources.working_set] - - missing_packages = [] - for package in required_packages: - if not package: # Skip empty lines - continue - package_name = package.strip().split("==")[0] - if package_name.lower() not in installed_packages: - missing_packages.append(package_name) - - if missing_packages: - print("Missing packages:") - print(", ".join(missing_packages)) - sys.exit(1) - else: - print("All packages are installed.") - - -if __name__ == "__main__": - main() diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/__init__.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/__init__.py deleted file mode 100644 index 54a9ea7f02824c517d2529ce3ae0ff4a607ca70f..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .dataset_wrapper import ConcatDataset -from .icdar_dataset import IcdarDataset -from .ocr_dataset import OCRDataset -from .recog_lmdb_dataset import RecogLMDBDataset -from .recog_text_dataset import RecogTextDataset -from .samplers import * # NOQA -from .transforms import * # NOQA -from .wildreceipt_dataset import WildReceiptDataset - -__all__ = [ - 'IcdarDataset', 'OCRDataset', 'RecogLMDBDataset', 'RecogTextDataset', - 'WildReceiptDataset', 'ConcatDataset' -] diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/dumpers/__init__.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/dumpers/__init__.py deleted file mode 100644 index ed3dda486b568ea5b4c7f48100b2c32c0b8ec987..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/dumpers/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base import BaseDumper -from .json_dumper import JsonDumper -from .lmdb_dumper import TextRecogLMDBDumper -from .wild_receipt_openset_dumper import WildreceiptOpensetDumper - -__all__ = [ - 'BaseDumper', 'JsonDumper', 'WildreceiptOpensetDumper', - 'TextRecogLMDBDumper' -] diff --git a/spaces/NATSpeech/PortaSpeech/inference/tts/fs.py b/spaces/NATSpeech/PortaSpeech/inference/tts/fs.py deleted file mode 100644 index ee7beb321b699e92e3ad72e9959a093ce65deb12..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/PortaSpeech/inference/tts/fs.py +++ /dev/null @@ -1,29 +0,0 @@ -import torch -from inference.tts.base_tts_infer import BaseTTSInfer -from modules.tts.fs import FastSpeech -from utils.commons.ckpt_utils import load_ckpt -from utils.commons.hparams import hparams - - -class FastSpeechInfer(BaseTTSInfer): - def build_model(self): - dict_size = len(self.ph_encoder) - model = FastSpeech(dict_size, self.hparams) - model.eval() - load_ckpt(model, hparams['work_dir'], 'model') - return model - - def forward_model(self, inp): - sample = self.input_to_batch(inp) - txt_tokens = sample['txt_tokens'] # [B, T_t] - spk_id = sample.get('spk_ids') - with torch.no_grad(): - output = self.model(txt_tokens, spk_id=spk_id, infer=True) - mel_out = output['mel_out'] - wav_out = self.run_vocoder(mel_out) - wav_out = wav_out.cpu().numpy() - return wav_out[0] - - -if __name__ == '__main__': - FastSpeechInfer.example_run() diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/evaluation/coco_utils.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/evaluation/coco_utils.py deleted file mode 100644 index 8155d1fbb89ac143eb7cc03457a6645a5b5ab505..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/evaluation/coco_utils.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Util functions related to pycocotools and COCO eval.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy -import json - -from absl import logging -import numpy as np -from PIL import Image -from pycocotools import coco -from pycocotools import mask as mask_api -import six -import tensorflow as tf - -from official.vision.detection.dataloader import tf_example_decoder -from official.vision.detection.utils import box_utils -from official.vision.detection.utils import mask_utils - - -class COCOWrapper(coco.COCO): - """COCO wrapper class. - - This class wraps COCO API object, which provides the following additional - functionalities: - 1. Support string type image id. - 2. Support loading the groundtruth dataset using the external annotation - dictionary. - 3. Support loading the prediction results using the external annotation - dictionary. - """ - - def __init__(self, eval_type='box', annotation_file=None, gt_dataset=None): - """Instantiates a COCO-style API object. - - Args: - eval_type: either 'box' or 'mask'. - annotation_file: a JSON file that stores annotations of the eval dataset. - This is required if `gt_dataset` is not provided. - gt_dataset: the groundtruth eval datatset in COCO API format. - """ - if ((annotation_file and gt_dataset) or - ((not annotation_file) and (not gt_dataset))): - raise ValueError('One and only one of `annotation_file` and `gt_dataset` ' - 'needs to be specified.') - - if eval_type not in ['box', 'mask']: - raise ValueError('The `eval_type` can only be either `box` or `mask`.') - - coco.COCO.__init__(self, annotation_file=annotation_file) - self._eval_type = eval_type - if gt_dataset: - self.dataset = gt_dataset - self.createIndex() - - def loadRes(self, predictions): - """Loads result file and return a result api object. - - Args: - predictions: a list of dictionary each representing an annotation in COCO - format. The required fields are `image_id`, `category_id`, `score`, - `bbox`, `segmentation`. - - Returns: - res: result COCO api object. - - Raises: - ValueError: if the set of image id from predctions is not the subset of - the set of image id of the groundtruth dataset. - """ - res = coco.COCO() - res.dataset['images'] = copy.deepcopy(self.dataset['images']) - res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) - - image_ids = [ann['image_id'] for ann in predictions] - if set(image_ids) != (set(image_ids) & set(self.getImgIds())): - raise ValueError('Results do not correspond to the current dataset!') - for ann in predictions: - x1, x2, y1, y2 = [ann['bbox'][0], ann['bbox'][0] + ann['bbox'][2], - ann['bbox'][1], ann['bbox'][1] + ann['bbox'][3]] - if self._eval_type == 'box': - ann['area'] = ann['bbox'][2] * ann['bbox'][3] - ann['segmentation'] = [ - [x1, y1, x1, y2, x2, y2, x2, y1]] - elif self._eval_type == 'mask': - ann['area'] = mask_api.area(ann['segmentation']) - - res.dataset['annotations'] = copy.deepcopy(predictions) - res.createIndex() - return res - - -def convert_predictions_to_coco_annotations(predictions): - """Converts a batch of predictions to annotations in COCO format. - - Args: - predictions: a dictionary of lists of numpy arrays including the following - fields. K below denotes the maximum number of instances per image. - Required fields: - - source_id: a list of numpy arrays of int or string of shape - [batch_size]. - - num_detections: a list of numpy arrays of int of shape [batch_size]. - - detection_boxes: a list of numpy arrays of float of shape - [batch_size, K, 4], where coordinates are in the original image - space (not the scaled image space). - - detection_classes: a list of numpy arrays of int of shape - [batch_size, K]. - - detection_scores: a list of numpy arrays of float of shape - [batch_size, K]. - Optional fields: - - detection_masks: a list of numpy arrays of float of shape - [batch_size, K, mask_height, mask_width]. - - Returns: - coco_predictions: prediction in COCO annotation format. - """ - coco_predictions = [] - num_batches = len(predictions['source_id']) - batch_size = predictions['source_id'][0].shape[0] - max_num_detections = predictions['detection_classes'][0].shape[1] - use_outer_box = 'detection_outer_boxes' in predictions - for i in range(num_batches): - predictions['detection_boxes'][i] = box_utils.yxyx_to_xywh( - predictions['detection_boxes'][i]) - if use_outer_box: - predictions['detection_outer_boxes'][i] = box_utils.yxyx_to_xywh( - predictions['detection_outer_boxes'][i]) - mask_boxes = predictions['detection_outer_boxes'] - else: - mask_boxes = predictions['detection_boxes'] - - for j in range(batch_size): - if 'detection_masks' in predictions: - image_masks = mask_utils.paste_instance_masks( - predictions['detection_masks'][i][j], - mask_boxes[i][j], - int(predictions['image_info'][i][j, 0, 0]), - int(predictions['image_info'][i][j, 0, 1])) - binary_masks = (image_masks > 0.0).astype(np.uint8) - encoded_masks = [ - mask_api.encode(np.asfortranarray(binary_mask)) - for binary_mask in list(binary_masks)] - for k in range(max_num_detections): - ann = {} - ann['image_id'] = predictions['source_id'][i][j] - ann['category_id'] = predictions['detection_classes'][i][j, k] - ann['bbox'] = predictions['detection_boxes'][i][j, k] - ann['score'] = predictions['detection_scores'][i][j, k] - if 'detection_masks' in predictions: - ann['segmentation'] = encoded_masks[k] - coco_predictions.append(ann) - - for i, ann in enumerate(coco_predictions): - ann['id'] = i + 1 - - return coco_predictions - - -def convert_groundtruths_to_coco_dataset(groundtruths, label_map=None): - """Converts groundtruths to the dataset in COCO format. - - Args: - groundtruths: a dictionary of numpy arrays including the fields below. - Note that each element in the list represent the number for a single - example without batch dimension. K below denotes the actual number of - instances for each image. - Required fields: - - source_id: a list of numpy arrays of int or string of shape - [batch_size]. - - height: a list of numpy arrays of int of shape [batch_size]. - - width: a list of numpy arrays of int of shape [batch_size]. - - num_detections: a list of numpy arrays of int of shape [batch_size]. - - boxes: a list of numpy arrays of float of shape [batch_size, K, 4], - where coordinates are in the original image space (not the - normalized coordinates). - - classes: a list of numpy arrays of int of shape [batch_size, K]. - Optional fields: - - is_crowds: a list of numpy arrays of int of shape [batch_size, K]. If - th field is absent, it is assumed that this instance is not crowd. - - areas: a list of numy arrays of float of shape [batch_size, K]. If the - field is absent, the area is calculated using either boxes or - masks depending on which one is available. - - masks: a list of numpy arrays of string of shape [batch_size, K], - label_map: (optional) a dictionary that defines items from the category id - to the category name. If `None`, collect the category mappping from the - `groundtruths`. - - Returns: - coco_groundtruths: the groundtruth dataset in COCO format. - """ - source_ids = np.concatenate(groundtruths['source_id'], axis=0) - heights = np.concatenate(groundtruths['height'], axis=0) - widths = np.concatenate(groundtruths['width'], axis=0) - gt_images = [{'id': int(i), 'height': int(h), 'width': int(w)} for i, h, w - in zip(source_ids, heights, widths)] - - gt_annotations = [] - num_batches = len(groundtruths['source_id']) - batch_size = groundtruths['source_id'][0].shape[0] - for i in range(num_batches): - for j in range(batch_size): - num_instances = groundtruths['num_detections'][i][j] - for k in range(num_instances): - ann = {} - ann['image_id'] = int(groundtruths['source_id'][i][j]) - if 'is_crowds' in groundtruths: - ann['iscrowd'] = int(groundtruths['is_crowds'][i][j, k]) - else: - ann['iscrowd'] = 0 - ann['category_id'] = int(groundtruths['classes'][i][j, k]) - boxes = groundtruths['boxes'][i] - ann['bbox'] = [ - float(boxes[j, k, 1]), - float(boxes[j, k, 0]), - float(boxes[j, k, 3] - boxes[j, k, 1]), - float(boxes[j, k, 2] - boxes[j, k, 0])] - if 'areas' in groundtruths: - ann['area'] = float(groundtruths['areas'][i][j, k]) - else: - ann['area'] = float( - (boxes[j, k, 3] - boxes[j, k, 1]) * - (boxes[j, k, 2] - boxes[j, k, 0])) - if 'masks' in groundtruths: - mask = Image.open(six.StringIO(groundtruths['masks'][i][j, k])) - width, height = mask.size - np_mask = ( - np.array(mask.getdata()).reshape(height, width).astype(np.uint8)) - np_mask[np_mask > 0] = 255 - encoded_mask = mask_api.encode(np.asfortranarray(np_mask)) - ann['segmentation'] = encoded_mask - if 'areas' not in groundtruths: - ann['area'] = mask_api.area(encoded_mask) - gt_annotations.append(ann) - - for i, ann in enumerate(gt_annotations): - ann['id'] = i + 1 - - if label_map: - gt_categories = [{'id': i, 'name': label_map[i]} for i in label_map] - else: - category_ids = [gt['category_id'] for gt in gt_annotations] - gt_categories = [{'id': i} for i in set(category_ids)] - - gt_dataset = { - 'images': gt_images, - 'categories': gt_categories, - 'annotations': copy.deepcopy(gt_annotations), - } - return gt_dataset - - -class COCOGroundtruthGenerator(object): - """Generates the groundtruth annotations from a single example.""" - - def __init__(self, file_pattern, num_examples, include_mask): - self._file_pattern = file_pattern - self._num_examples = num_examples - self._include_mask = include_mask - self._dataset_fn = tf.data.TFRecordDataset - - def _parse_single_example(self, example): - """Parses a single serialized tf.Example proto. - - Args: - example: a serialized tf.Example proto string. - - Returns: - A dictionary of groundtruth with the following fields: - source_id: a scalar tensor of int64 representing the image source_id. - height: a scalar tensor of int64 representing the image height. - width: a scalar tensor of int64 representing the image width. - boxes: a float tensor of shape [K, 4], representing the groundtruth - boxes in absolute coordinates with respect to the original image size. - classes: a int64 tensor of shape [K], representing the class labels of - each instances. - is_crowds: a bool tensor of shape [K], indicating whether the instance - is crowd. - areas: a float tensor of shape [K], indicating the area of each - instance. - masks: a string tensor of shape [K], containing the bytes of the png - mask of each instance. - """ - decoder = tf_example_decoder.TfExampleDecoder( - include_mask=self._include_mask) - decoded_tensors = decoder.decode(example) - - image = decoded_tensors['image'] - image_size = tf.shape(image)[0:2] - boxes = box_utils.denormalize_boxes( - decoded_tensors['groundtruth_boxes'], image_size) - groundtruths = { - 'source_id': tf.string_to_number( - decoded_tensors['source_id'], out_type=tf.int64), - 'height': decoded_tensors['height'], - 'width': decoded_tensors['width'], - 'num_detections': tf.shape(decoded_tensors['groundtruth_classes'])[0], - 'boxes': boxes, - 'classes': decoded_tensors['groundtruth_classes'], - 'is_crowds': decoded_tensors['groundtruth_is_crowd'], - 'areas': decoded_tensors['groundtruth_area'], - } - if self._include_mask: - groundtruths.update({ - 'masks': decoded_tensors['groundtruth_instance_masks_png'], - }) - return groundtruths - - def _build_pipeline(self): - """Builds data pipeline to generate groundtruth annotations.""" - dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False) - dataset = dataset.apply( - tf.data.experimental.parallel_interleave( - lambda filename: self._dataset_fn(filename).prefetch(1), - cycle_length=32, - sloppy=False)) - dataset = dataset.map(self._parse_single_example, num_parallel_calls=64) - dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) - dataset = dataset.batch(1, drop_remainder=False) - return dataset - - def __call__(self): - with tf.Graph().as_default(): - dataset = self._build_pipeline() - groundtruth = dataset.make_one_shot_iterator().get_next() - - with tf.Session() as sess: - for _ in range(self._num_examples): - groundtruth_result = sess.run(groundtruth) - yield groundtruth_result - - -def scan_and_generator_annotation_file(file_pattern, - num_samples, - include_mask, - annotation_file): - """Scans and generate the COCO-style annotation JSON file given a dataset.""" - groundtruth_generator = COCOGroundtruthGenerator( - file_pattern, num_samples, include_mask) - generate_annotation_file(groundtruth_generator, annotation_file) - - -def generate_annotation_file(groundtruth_generator, - annotation_file): - """Generates COCO-style annotation JSON file given a groundtruth generator.""" - groundtruths = {} - logging.info('Loading groundtruth annotations from dataset to memory...') - for groundtruth in groundtruth_generator(): - for k, v in six.iteritems(groundtruth): - if k not in groundtruths: - groundtruths[k] = [v] - else: - groundtruths[k].append(v) - gt_dataset = convert_groundtruths_to_coco_dataset(groundtruths) - - logging.info('Saving groundtruth annotations to the JSON file...') - with tf.io.gfile.GFile(annotation_file, 'w') as f: - f.write(json.dumps(gt_dataset)) - logging.info('Done saving the JSON file...') diff --git a/spaces/NMEX/vits-uma-genshin-honkai/text/__init__.py b/spaces/NMEX/vits-uma-genshin-honkai/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/NMEX/vits-uma-genshin-honkai/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/NN520/AI/src/components/turn-counter.tsx b/spaces/NN520/AI/src/components/turn-counter.tsx deleted file mode 100644 index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000 --- a/spaces/NN520/AI/src/components/turn-counter.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import React from 'react' -import { Throttling } from '@/lib/bots/bing/types' - -export interface TurnCounterProps { - throttling?: Throttling -} - -export function TurnCounter({ throttling }: TurnCounterProps) { - if (!throttling) { - return null - } - - return ( -
    -
    - {throttling.numUserMessagesInConversation} - - {throttling.maxNumUserMessagesInConversation} -
    -
    -
    - ) -} diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh deleted file mode 100644 index e3efeb21d302ef8d9eae8f1d4b06434c593705f6..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -echo 'Cloning Moses github repository (for tokenization scripts)...' -git clone https://github.com/moses-smt/mosesdecoder.git - -SCRIPTS=mosesdecoder/scripts -TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl -CLEAN=$SCRIPTS/training/clean-corpus-n.perl -REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl - -URLS=( - "http://statmt.org/wmt13/training-parallel-europarl-v7.tgz" - "http://statmt.org/wmt13/training-parallel-commoncrawl.tgz" - "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz" - "http://data.statmt.org/wmt18/translation-task/rapid2016.tgz" - "http://data.statmt.org/wmt17/translation-task/dev.tgz" - "http://statmt.org/wmt14/test-full.tgz" -) -CORPORA=( - "training/europarl-v7.de-en" - "commoncrawl.de-en" - "training-parallel-nc-v13/news-commentary-v13.de-en" - "rapid2016.de-en" -) - -if [ ! -d "$SCRIPTS" ]; then - echo "Please set SCRIPTS variable correctly to point to Moses scripts." - exit -fi - -src=en -tgt=de -lang=en-de -prep=wmt18_en_de -tmp=$prep/tmp -orig=orig -dev=dev/newstest2012 -codes=32000 -bpe=bpe.32k - -mkdir -p $orig $tmp $prep $bpe - -cd $orig - -for ((i=0;i<${#URLS[@]};++i)); do - url=${URLS[i]} - file=$(basename $url) - if [ -f $file ]; then - echo "$file already exists, skipping download" - else - wget "$url" - if [ -f $file ]; then - echo "$url successfully downloaded." - else - echo "$url not successfully downloaded." - exit 1 - fi - if [ ${file: -4} == ".tgz" ]; then - tar zxvf $file - elif [ ${file: -4} == ".tar" ]; then - tar xvf $file - fi - fi -done -cd .. - -echo "pre-processing train data..." -for l in $src $tgt; do - rm -rf $tmp/train.tags.$lang.tok.$l - for f in "${CORPORA[@]}"; do - cat $orig/$f.$l | \ - perl $REM_NON_PRINT_CHAR | \ - perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/train.tags.$lang.tok.$l - done -done - -echo "pre-processing test data..." -for l in $src $tgt; do - if [ "$l" == "$src" ]; then - t="src" - else - t="ref" - fi - grep '\s*//g' | \ - sed -e 's/\s*<\/seg>\s*//g' | \ - sed -e "s/\’/\'/g" | \ - perl $TOKENIZER -threads 8 -l $l -no-escape > $tmp/test.$l - echo "" -done - -# apply length filtering before BPE -perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train 1 100 - -# use newstest2012 for valid -echo "pre-processing valid data..." -for l in $src $tgt; do - rm -rf $tmp/valid.$l - cat $orig/$dev.$l | \ - perl $REM_NON_PRINT_CHAR | \ - perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/valid.$l -done - -mkdir output -mv $tmp/{train,valid,test}.{$src,$tgt} output - -#BPE -git clone https://github.com/glample/fastBPE.git -pushd fastBPE -g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast -popd -fastBPE/fast learnbpe $codes output/train.$src output/train.$tgt > $bpe/codes -for split in {train,valid,test}; do for lang in {en,de}; do fastBPE/fast applybpe $bpe/$split.$lang output/$split.$lang $bpe/codes; done; done diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/linformer/linformer_src/models/linformer_roberta.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/linformer/linformer_src/models/linformer_roberta.py deleted file mode 100644 index b7bdbb11057d0ba791c2f8c7fb1e77507c90172e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/linformer/linformer_src/models/linformer_roberta.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Linformer: Self-Attention with Linear Complexity -""" - -import logging - -import torch -from fairseq import utils -from fairseq.models import register_model, register_model_architecture -from fairseq.models.roberta import ( - init_bert_params, - roberta_base_architecture, - roberta_large_architecture, - RobertaEncoder, - RobertaModel, -) -from fairseq.utils import safe_hasattr - -from ..modules.linformer_sentence_encoder import LinformerTransformerEncoder - - -logger = logging.getLogger(__name__) - - -@register_model("linformer_roberta") -class LinformerModel(RobertaModel): - @staticmethod - def add_args(parser): - RobertaModel.add_args(parser) - - # add args for Linformer - parser.add_argument( - "--compressed", type=int, help="compressed ratio of sequence length" - ) - parser.add_argument( - "--shared-kv-compressed", - type=int, - help="share compressed matrix between k and v, in each layer", - ) - parser.add_argument( - "--shared-layer-kv-compressed", - type=int, - help="share compressed matrix between k and v and across all layers", - ) - parser.add_argument( - "--freeze-compress", - type=int, - help="freeze the parameters in compressed layer", - ) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - - # make sure all arguments are present - base_architecture(args) - - if not safe_hasattr(args, "max_positions"): - args.max_positions = args.tokens_per_sample - - encoder = LinformerEncoder(args, task.source_dictionary) - return cls(args, encoder) - - -class LinformerEncoder(RobertaEncoder): - """Linformer encoder.""" - - def __init__(self, args, dictionary): - super().__init__(args, dictionary) - self.register_buffer("version", torch.tensor(2)) - - def build_encoder(self, args, dictionary, embed_tokens): - encoder = LinformerTransformerEncoder(args, dictionary, embed_tokens) - encoder.apply(init_bert_params) - return encoder - - def upgrade_state_dict_named(self, state_dict, name): - super().upgrade_state_dict_named(state_dict, name) - prefix = name + "." if name != "" else "" - - # some old checkpoints had weight sharing implemented incorrectly - # (note: this was correct in the original paper code) - if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2: - state_dict[f"{prefix}version"] = torch.tensor(1) - # check if input embeddings and output embeddings were tied - if not torch.allclose( - state_dict[f"{prefix}sentence_encoder.embed_tokens.weight"], - state_dict[f"{prefix}lm_head.weight"], - ): - # they weren't tied, re-init the LM head without weight sharing - self.lm_head = self.build_lm_head( - embed_dim=self.args.encoder_embed_dim, - output_dim=len(self.dictionary), - activation_fn=self.args.activation_fn, - weight=None, # don't share weights - ) - - -@register_model_architecture("linformer_roberta", "linformer_roberta") -def base_architecture(args): - args.compressed = getattr(args, "compressed", 4) - args.shared_kv_compressed = getattr(args, "shared_kv_compressed", 0) - args.shared_layer_kv_compressed = getattr(args, "shared_layer_kv_compressed", 0) - args.freeze_compress = getattr(args, "freeze_compress", 0) - roberta_base_architecture(args) - - -@register_model_architecture("linformer_roberta", "linformer_roberta_base") -def linformer_roberta_base_architecture(args): - base_architecture(args) - - -@register_model_architecture("linformer_roberta", "linformer_roberta_large") -def linformer_roberta_large_architecture(args): - roberta_large_architecture(args) - base_architecture(args) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/README.md b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/README.md deleted file mode 100644 index 314984fcbb6825169193b21bd6bb3fca5fd2503b..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# Self-Training with Kaldi HMM Models -This folder contains recipes for self-training on pseudo phone transcripts and -decoding into phones or words with [kaldi](https://github.com/kaldi-asr/kaldi). - -To start, download and install kaldi follow its instruction, and place this -folder in `path/to/kaldi/egs`. - -## Training -Assuming the following has been prepared: -- `w2v_dir`: contains features `{train,valid}.{npy,lengths}`, real transcripts `{train,valid}.${label}`, and dict `dict.${label}.txt` -- `lab_dir`: contains pseudo labels `{train,valid}.txt` -- `arpa_lm`: Arpa-format n-gram phone LM for decoding -- `arpa_lm_bin`: Arpa-format n-gram phone LM for unsupervised model selection to be used with KenLM - -Set these variables in `train.sh`, as well as `out_dir`, the output directory, -and then run it. - -The output will be: -``` -==== WER w.r.t. real transcript (select based on unsupervised metric) -INFO:root:./out/exp/mono/decode_valid/scoring/14.0.0.tra.txt: score 0.9178 wer 28.71% lm_ppl 24.4500 gt_wer 25.57% -INFO:root:./out/exp/tri1/decode_valid/scoring/17.1.0.tra.txt: score 0.9257 wer 26.99% lm_ppl 30.8494 gt_wer 21.90% -INFO:root:./out/exp/tri2b/decode_valid/scoring/8.0.0.tra.txt: score 0.7506 wer 23.15% lm_ppl 25.5944 gt_wer 15.78% -``` -where `wer` is the word eror rate with respect to the pseudo label, `gt_wer` to -the ground truth label, `lm_ppl` the language model perplexity of HMM prediced -transcripts, and `score` is the unsupervised metric for model selection. We -choose the model and the LM parameter of the one with the lowest score. In the -example above, it is `tri2b`, `8.0.0`. - - -## Decoding into Phones -In `decode_phone.sh`, set `out_dir` the same as used in `train.sh`, set -`dec_exp` and `dec_lmparam` to the selected model and LM parameter (e.g. -`tri2b` and `8.0.0` in the above example). `dec_script` needs to be set -according to `dec_exp`: for mono/tri1/tri2b, use `decode.sh`; for tri3b, use -`decode_fmllr.sh`. - -The output will be saved at `out_dir/dec_data` - - -## Decoding into Words -`decode_word_step1.sh` prepares WFSTs for word decoding. Besides the variables -mentioned above, set -- `wrd_arpa_lm`: Arpa-format n-gram word LM for decoding -- `wrd_arpa_lm_bin`: Arpa-format n-gram word LM for unsupervised model selection - -`decode_word_step1.sh` decodes the `train` and `valid` split into word and runs -unsupervised model selection using the `valid` split. The output is like: -``` -INFO:root:./out/exp/tri2b/decodeword_valid/scoring/17.0.0.tra.txt: score 1.8693 wer 24.97% lm_ppl 1785.5333 gt_wer 31.45% -``` - -After determining the LM parameter (`17.0.0` in the example above), set it in -`decode_word_step2.sh` and run it. The output will be saved at -`out_dir/dec_data_word`. diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/transformer/transformer_encoder.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/transformer/transformer_encoder.py deleted file mode 100644 index f007776a6f3b7e6731edc01d95aa24eed255d0e8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/transformer/transformer_encoder.py +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from typing import Dict, List, Optional - -import torch -import torch.nn as nn -from fairseq import utils -from fairseq.distributed import fsdp_wrap -from fairseq.models import FairseqEncoder -from fairseq.modules import ( - FairseqDropout, - LayerDropModuleList, - LayerNorm, - PositionalEmbedding, - SinusoidalPositionalEmbedding, -) -from fairseq.modules import transformer_layer -from fairseq.modules.checkpoint_activations import checkpoint_wrapper -from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ -from torch import Tensor -from fairseq.models.transformer import ( - TransformerConfig, -) - - -# rewrite name for backward compatibility in `make_generation_fast_` -def module_name_fordropout(module_name: str) -> str: - if module_name == 'TransformerEncoderBase': - return 'TransformerEncoder' - else: - return module_name - - -class TransformerEncoderBase(FairseqEncoder): - """ - Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer - is a :class:`TransformerEncoderLayer`. - - Args: - args (argparse.Namespace): parsed command-line arguments - dictionary (~fairseq.data.Dictionary): encoding dictionary - embed_tokens (torch.nn.Embedding): input embedding - """ - - def __init__(self, cfg, dictionary, embed_tokens): - self.cfg = cfg - super().__init__(dictionary) - self.register_buffer("version", torch.Tensor([3])) - - self.dropout_module = FairseqDropout( - cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__) - ) - self.encoder_layerdrop = cfg.encoder.layerdrop - - embed_dim = embed_tokens.embedding_dim - self.padding_idx = embed_tokens.padding_idx - self.max_source_positions = cfg.max_source_positions - - self.embed_tokens = embed_tokens - - self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim) - - self.embed_positions = ( - PositionalEmbedding( - cfg.max_source_positions, - embed_dim, - self.padding_idx, - learned=cfg.encoder.learned_pos, - ) - if not cfg.no_token_positional_embeddings - else None - ) - if cfg.layernorm_embedding: - self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export) - else: - self.layernorm_embedding = None - - if not cfg.adaptive_input and cfg.quant_noise.pq > 0: - self.quant_noise = apply_quant_noise_( - nn.Linear(embed_dim, embed_dim, bias=False), - cfg.quant_noise.pq, - cfg.quant_noise.pq_block_size, - ) - else: - self.quant_noise = None - - if self.encoder_layerdrop > 0.0: - self.layers = LayerDropModuleList(p=self.encoder_layerdrop) - else: - self.layers = nn.ModuleList([]) - self.layers.extend( - [self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)] - ) - self.num_layers = len(self.layers) - - if cfg.encoder.normalize_before: - self.layer_norm = LayerNorm(embed_dim, export=cfg.export) - else: - self.layer_norm = None - - def build_encoder_layer(self, cfg): - layer = transformer_layer.TransformerEncoderLayerBase(cfg) - checkpoint = cfg.checkpoint_activations - if checkpoint: - offload_to_cpu = cfg.offload_activations - layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) - # if we are checkpointing, enforce that FSDP always wraps the - # checkpointed layer, regardless of layer size - min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0 - layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) - return layer - - def forward_embedding( - self, src_tokens, token_embedding: Optional[torch.Tensor] = None - ): - # embed tokens and positions - if token_embedding is None: - token_embedding = self.embed_tokens(src_tokens) - x = embed = self.embed_scale * token_embedding - if self.embed_positions is not None: - x = embed + self.embed_positions(src_tokens) - if self.layernorm_embedding is not None: - x = self.layernorm_embedding(x) - x = self.dropout_module(x) - if self.quant_noise is not None: - x = self.quant_noise(x) - return x, embed - - def forward( - self, - src_tokens, - src_lengths: Optional[torch.Tensor] = None, - return_all_hiddens: bool = False, - token_embeddings: Optional[torch.Tensor] = None, - ): - """ - Args: - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (torch.LongTensor): lengths of each source sentence of - shape `(batch)` - return_all_hiddens (bool, optional): also return all of the - intermediate hidden states (default: False). - token_embeddings (torch.Tensor, optional): precomputed embeddings - default `None` will recompute embeddings - - Returns: - dict: - - **encoder_out** (Tensor): the last encoder layer's output of - shape `(src_len, batch, embed_dim)` - - **encoder_padding_mask** (ByteTensor): the positions of - padding elements of shape `(batch, src_len)` - - **encoder_embedding** (Tensor): the (scaled) embedding lookup - of shape `(batch, src_len, embed_dim)` - - **encoder_states** (List[Tensor]): all intermediate - hidden states of shape `(src_len, batch, embed_dim)`. - Only populated if *return_all_hiddens* is True. - """ - return self.forward_scriptable( - src_tokens, src_lengths, return_all_hiddens, token_embeddings - ) - - # TorchScript doesn't support super() method so that the scriptable Subclass - # can't access the base class model in Torchscript. - # Current workaround is to add a helper function with different name and - # call the helper function from scriptable Subclass. - def forward_scriptable( - self, - src_tokens, - src_lengths: Optional[torch.Tensor] = None, - return_all_hiddens: bool = False, - token_embeddings: Optional[torch.Tensor] = None, - ): - """ - Args: - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (torch.LongTensor): lengths of each source sentence of - shape `(batch)` - return_all_hiddens (bool, optional): also return all of the - intermediate hidden states (default: False). - token_embeddings (torch.Tensor, optional): precomputed embeddings - default `None` will recompute embeddings - - Returns: - dict: - - **encoder_out** (Tensor): the last encoder layer's output of - shape `(src_len, batch, embed_dim)` - - **encoder_padding_mask** (ByteTensor): the positions of - padding elements of shape `(batch, src_len)` - - **encoder_embedding** (Tensor): the (scaled) embedding lookup - of shape `(batch, src_len, embed_dim)` - - **encoder_states** (List[Tensor]): all intermediate - hidden states of shape `(src_len, batch, embed_dim)`. - Only populated if *return_all_hiddens* is True. - """ - # compute padding mask - encoder_padding_mask = src_tokens.eq(self.padding_idx) - has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any() - - x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings) - - # account for padding while computing the representation - if has_pads: - x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x)) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - encoder_states = [] - - if return_all_hiddens: - encoder_states.append(x) - - # encoder layers - for layer in self.layers: - x = layer( - x, encoder_padding_mask=encoder_padding_mask if has_pads else None - ) - if return_all_hiddens: - assert encoder_states is not None - encoder_states.append(x) - - if self.layer_norm is not None: - x = self.layer_norm(x) - - # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in - # `forward` so we use a dictionary instead. - # TorchScript does not support mixed values so the values are all lists. - # The empty list is equivalent to None. - src_lengths = src_tokens.ne(self.padding_idx).sum(dim=1, dtype=torch.int32).reshape(-1, 1).contiguous() - return { - "encoder_out": [x], # T x B x C - "encoder_padding_mask": [encoder_padding_mask], # B x T - "encoder_embedding": [encoder_embedding], # B x T x C - "encoder_states": encoder_states, # List[T x B x C] - "src_tokens": [], - "src_lengths": [src_lengths], - } - - @torch.jit.export - def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): - """ - Reorder encoder output according to *new_order*. - - Args: - encoder_out: output from the ``forward()`` method - new_order (LongTensor): desired order - - Returns: - *encoder_out* rearranged according to *new_order* - """ - if len(encoder_out["encoder_out"]) == 0: - new_encoder_out = [] - else: - new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)] - if len(encoder_out["encoder_padding_mask"]) == 0: - new_encoder_padding_mask = [] - else: - new_encoder_padding_mask = [ - encoder_out["encoder_padding_mask"][0].index_select(0, new_order) - ] - if len(encoder_out["encoder_embedding"]) == 0: - new_encoder_embedding = [] - else: - new_encoder_embedding = [ - encoder_out["encoder_embedding"][0].index_select(0, new_order) - ] - - if len(encoder_out["src_tokens"]) == 0: - src_tokens = [] - else: - src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)] - - if len(encoder_out["src_lengths"]) == 0: - src_lengths = [] - else: - src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)] - - encoder_states = encoder_out["encoder_states"] - if len(encoder_states) > 0: - for idx, state in enumerate(encoder_states): - encoder_states[idx] = state.index_select(1, new_order) - - return { - "encoder_out": new_encoder_out, # T x B x C - "encoder_padding_mask": new_encoder_padding_mask, # B x T - "encoder_embedding": new_encoder_embedding, # B x T x C - "encoder_states": encoder_states, # List[T x B x C] - "src_tokens": src_tokens, # B x T - "src_lengths": src_lengths, # B x 1 - } - - def max_positions(self): - """Maximum input length supported by the encoder.""" - if self.embed_positions is None: - return self.max_source_positions - return min(self.max_source_positions, self.embed_positions.max_positions) - - def upgrade_state_dict_named(self, state_dict, name): - """Upgrade a (possibly old) state dict for new versions of fairseq.""" - if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): - weights_key = "{}.embed_positions.weights".format(name) - if weights_key in state_dict: - print("deleting {0}".format(weights_key)) - del state_dict[weights_key] - state_dict[ - "{}.embed_positions._float_tensor".format(name) - ] = torch.FloatTensor(1) - for i in range(self.num_layers): - # update layer norms - self.layers[i].upgrade_state_dict_named( - state_dict, "{}.layers.{}".format(name, i) - ) - - version_key = "{}.version".format(name) - if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: - # earlier checkpoints did not normalize after the stack of layers - self.layer_norm = None - self.normalize = False - state_dict[version_key] = torch.Tensor([1]) - return state_dict - - -class TransformerEncoder(TransformerEncoderBase): - def __init__(self, args, dictionary, embed_tokens): - self.args = args - super().__init__( - TransformerConfig.from_namespace(args), - dictionary, - embed_tokens, - ) - - def build_encoder_layer(self, args): - return super().build_encoder_layer( - TransformerConfig.from_namespace(args), - ) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/glow.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/glow.py deleted file mode 100644 index 7a7696403d505afdf0f1606f8220801b0f46152f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/glow.py +++ /dev/null @@ -1,311 +0,0 @@ -# ***************************************************************************** -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the NVIDIA CORPORATION nor the -# names of its contributors may be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# ***************************************************************************** -import copy -import torch -from torch.autograd import Variable -import torch.nn.functional as F - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a+input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -class WaveGlowLoss(torch.nn.Module): - def __init__(self, sigma=1.0): - super(WaveGlowLoss, self).__init__() - self.sigma = sigma - - def forward(self, model_output): - z, log_s_list, log_det_W_list = model_output - for i, log_s in enumerate(log_s_list): - if i == 0: - log_s_total = torch.sum(log_s) - log_det_W_total = log_det_W_list[i] - else: - log_s_total = log_s_total + torch.sum(log_s) - log_det_W_total += log_det_W_list[i] - - loss = torch.sum(z*z)/(2*self.sigma*self.sigma) - log_s_total - log_det_W_total - return loss/(z.size(0)*z.size(1)*z.size(2)) - - -class Invertible1x1Conv(torch.nn.Module): - """ - The layer outputs both the convolution, and the log determinant - of its weight matrix. If reverse=True it does convolution with - inverse - """ - def __init__(self, c): - super(Invertible1x1Conv, self).__init__() - self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, - bias=False) - - # Sample a random orthonormal matrix to initialize weights - W = torch.qr(torch.FloatTensor(c, c).normal_())[0] - - # Ensure determinant is 1.0 not -1.0 - if torch.det(W) < 0: - W[:,0] = -1*W[:,0] - W = W.view(c, c, 1) - self.conv.weight.data = W - - def forward(self, z, reverse=False): - # shape - batch_size, group_size, n_of_groups = z.size() - - W = self.conv.weight.squeeze() - - if reverse: - if not hasattr(self, 'W_inverse'): - # Reverse computation - W_inverse = W.float().inverse() - W_inverse = Variable(W_inverse[..., None]) - if z.type() == 'torch.cuda.HalfTensor': - W_inverse = W_inverse.half() - self.W_inverse = W_inverse - z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) - return z - else: - # Forward computation - log_det_W = batch_size * n_of_groups * torch.logdet(W) - z = self.conv(z) - return z, log_det_W - - -class WN(torch.nn.Module): - """ - This is the WaveNet like layer for the affine coupling. The primary difference - from WaveNet is the convolutions need not be causal. There is also no dilation - size reset. The dilation only doubles on each layer - """ - def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels, - kernel_size): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - assert(n_channels % 2 == 0) - self.n_layers = n_layers - self.n_channels = n_channels - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - - start = torch.nn.Conv1d(n_in_channels, n_channels, 1) - start = torch.nn.utils.weight_norm(start, name='weight') - self.start = start - - # Initializing last layer to 0 makes the affine coupling layers - # do nothing at first. This helps with training stability - end = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1) - end.weight.data.zero_() - end.bias.data.zero_() - self.end = end - - cond_layer = torch.nn.Conv1d(n_mel_channels, 2*n_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = 2 ** i - padding = int((kernel_size*dilation - dilation)/2) - in_layer = torch.nn.Conv1d(n_channels, 2*n_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2*n_channels - else: - res_skip_channels = n_channels - res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, forward_input): - audio, spect = forward_input - audio = self.start(audio) - output = torch.zeros_like(audio) - n_channels_tensor = torch.IntTensor([self.n_channels]) - - spect = self.cond_layer(spect) - - for i in range(self.n_layers): - spect_offset = i*2*self.n_channels - acts = fused_add_tanh_sigmoid_multiply( - self.in_layers[i](audio), - spect[:,spect_offset:spect_offset+2*self.n_channels,:], - n_channels_tensor) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - audio = audio + res_skip_acts[:,:self.n_channels,:] - output = output + res_skip_acts[:,self.n_channels:,:] - else: - output = output + res_skip_acts - - return self.end(output) - - -class WaveGlow(torch.nn.Module): - def __init__(self, n_mel_channels, n_flows, n_group, n_early_every, - n_early_size, WN_config): - super(WaveGlow, self).__init__() - - self.upsample = torch.nn.ConvTranspose1d(n_mel_channels, - n_mel_channels, - 1024, stride=256) - assert(n_group % 2 == 0) - self.n_flows = n_flows - self.n_group = n_group - self.n_early_every = n_early_every - self.n_early_size = n_early_size - self.WN = torch.nn.ModuleList() - self.convinv = torch.nn.ModuleList() - - n_half = int(n_group/2) - - # Set up layers with the right sizes based on how many dimensions - # have been output already - n_remaining_channels = n_group - for k in range(n_flows): - if k % self.n_early_every == 0 and k > 0: - n_half = n_half - int(self.n_early_size/2) - n_remaining_channels = n_remaining_channels - self.n_early_size - self.convinv.append(Invertible1x1Conv(n_remaining_channels)) - self.WN.append(WN(n_half, n_mel_channels*n_group, **WN_config)) - self.n_remaining_channels = n_remaining_channels # Useful during inference - - def forward(self, forward_input): - """ - forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames - forward_input[1] = audio: batch x time - """ - spect, audio = forward_input - - # Upsample spectrogram to size of audio - spect = self.upsample(spect) - assert(spect.size(2) >= audio.size(1)) - if spect.size(2) > audio.size(1): - spect = spect[:, :, :audio.size(1)] - - spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3) - spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1) - - audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1) - output_audio = [] - log_s_list = [] - log_det_W_list = [] - - for k in range(self.n_flows): - if k % self.n_early_every == 0 and k > 0: - output_audio.append(audio[:,:self.n_early_size,:]) - audio = audio[:,self.n_early_size:,:] - - audio, log_det_W = self.convinv[k](audio) - log_det_W_list.append(log_det_W) - - n_half = int(audio.size(1)/2) - audio_0 = audio[:,:n_half,:] - audio_1 = audio[:,n_half:,:] - - output = self.WN[k]((audio_0, spect)) - log_s = output[:, n_half:, :] - b = output[:, :n_half, :] - audio_1 = torch.exp(log_s)*audio_1 + b - log_s_list.append(log_s) - - audio = torch.cat([audio_0, audio_1],1) - - output_audio.append(audio) - return torch.cat(output_audio,1), log_s_list, log_det_W_list - - def infer(self, spect, sigma=1.0): - spect = self.upsample(spect) - # trim conv artifacts. maybe pad spec to kernel multiple - time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0] - spect = spect[:, :, :-time_cutoff] - - spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3) - spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1) - - if spect.type() == 'torch.cuda.HalfTensor': - audio = torch.cuda.HalfTensor(spect.size(0), - self.n_remaining_channels, - spect.size(2)).normal_() - else: - audio = torch.cuda.FloatTensor(spect.size(0), - self.n_remaining_channels, - spect.size(2)).normal_() - - audio = torch.autograd.Variable(sigma*audio) - - for k in reversed(range(self.n_flows)): - n_half = int(audio.size(1)/2) - audio_0 = audio[:,:n_half,:] - audio_1 = audio[:,n_half:,:] - - output = self.WN[k]((audio_0, spect)) - - s = output[:, n_half:, :] - b = output[:, :n_half, :] - audio_1 = (audio_1 - b)/torch.exp(s) - audio = torch.cat([audio_0, audio_1],1) - - audio = self.convinv[k](audio, reverse=True) - - if k % self.n_early_every == 0 and k > 0: - if spect.type() == 'torch.cuda.HalfTensor': - z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_() - else: - z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_() - audio = torch.cat((sigma*z, audio),1) - - audio = audio.permute(0,2,1).contiguous().view(audio.size(0), -1).data - return audio - - @staticmethod - def remove_weightnorm(model): - waveglow = model - for WN in waveglow.WN: - WN.start = torch.nn.utils.remove_weight_norm(WN.start) - WN.in_layers = remove(WN.in_layers) - WN.cond_layer = torch.nn.utils.remove_weight_norm(WN.cond_layer) - WN.res_skip_layers = remove(WN.res_skip_layers) - return waveglow - - -def remove(conv_list): - new_conv_list = torch.nn.ModuleList() - for old_conv in conv_list: - old_conv = torch.nn.utils.remove_weight_norm(old_conv) - new_conv_list.append(old_conv) - return new_conv_list diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh deleted file mode 100644 index f3a3d3fc7cc98a38d8e9d523a0b43c0c8ea51bf9..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -set -eu - -w2v_dir= # contains features `{train,valid}.{npy,lengths}`, real transcripts `{train,valid}.${label}`, and dict `dict.${label}.txt` -lab_dir= # contains pseudo labels `{train,valid}.txt` -out_dir= # output root -arpa_lm= # phone LM -arpa_lm_bin= # (binary) phone LM for KenLM, used in unsupervised selection - -label=phnc -train_name="train" -valid_name="valid" -data_dir=${out_dir}/data - -mkdir -p ${out_dir}/exp -local/prepare_lang.sh $w2v_dir/dict.${label}.txt $data_dir -local/prepare_lm.sh $arpa_lm $data_dir - -for x in $train_name $valid_name; do - x_gt=${x}_gt - - # prepare pseudo data - python local/prepare_data_from_w2v.py $w2v_dir $data_dir $x - steps/compute_cmvn_stats.sh $data_dir/$x $out_dir/exp/make_feat/$x $out_dir/feats/$x - python local/copy_aligned_text.py < $lab_dir/$x.txt > $data_dir/$x/text - - # prepare ground truth data - mkdir $data_dir/$x_gt - cp $data_dir/$x/{feats.scp,cmvn.scp,utt2spk,spk2utt} $data_dir/$x_gt/ - python local/copy_aligned_text.py < $w2v_dir/$x.$label > $data_dir/$x_gt/text -done - -local/train_subset_lgbeam.sh \ - --out_root ${out_dir} --out_name exp --train $train_name --valid $valid_name \ - --mono_size 2000 --tri1_size 5000 --tri2b_size -1 --tri3b_size -1 \ - --stage 1 --max_stage 3 $data_dir $data_dir/lang $data_dir/lang_test - -local/unsup_select_decode.sh \ - --split $valid_name --kenlm_path $arpa_lm_bin \ - --ref_txt $data_dir/${valid_name}_gt/text \ - --psd_txt $data_dir/${valid_name}/text \ - $out_dir/exp diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/__init__.py deleted file mode 100644 index dc9fd1886d55756b5bdfeccf1ad329bd419a706e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -import os -import sys - -try: - from .version import __version__ # noqa -except ImportError: - version_txt = os.path.join(os.path.dirname(__file__), "version.txt") - with open(version_txt) as f: - __version__ = f.read().strip() - -__all__ = ["pdb"] - -# backwards compatibility to support `from fairseq.X import Y` -from fairseq.distributed import utils as distributed_utils -from fairseq.logging import meters, metrics, progress_bar # noqa - -sys.modules["fairseq.distributed_utils"] = distributed_utils -sys.modules["fairseq.meters"] = meters -sys.modules["fairseq.metrics"] = metrics -sys.modules["fairseq.progress_bar"] = progress_bar - -# initialize hydra -from fairseq.dataclass.initialize import hydra_init -hydra_init() - -import fairseq.criterions # noqa -import fairseq.distributed # noqa -import fairseq.models # noqa -import fairseq.modules # noqa -import fairseq.optim # noqa -import fairseq.optim.lr_scheduler # noqa -import fairseq.pdb # noqa -import fairseq.scoring # noqa -import fairseq.tasks # noqa -import fairseq.token_generation_constraints # noqa - -import fairseq.benchmark # noqa -import fairseq.model_parallel # noqa diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/vggblock.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/vggblock.py deleted file mode 100644 index ee5ee19a34816c7350c21fba7c4907fec8ca7a61..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/vggblock.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from __future__ import absolute_import, division, print_function, unicode_literals - -from collections.abc import Iterable -from itertools import repeat - -import torch -import torch.nn as nn - - -def _pair(v): - if isinstance(v, Iterable): - assert len(v) == 2, "len(v) != 2" - return v - return tuple(repeat(v, 2)) - - -def infer_conv_output_dim(conv_op, input_dim, sample_inchannel): - sample_seq_len = 200 - sample_bsz = 10 - x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim) - # N x C x H x W - # N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim - x = conv_op(x) - # N x C x H x W - x = x.transpose(1, 2) - # N x H x C x W - bsz, seq = x.size()[:2] - per_channel_dim = x.size()[3] - # bsz: N, seq: H, CxW the rest - return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim - - -class VGGBlock(torch.nn.Module): - """ - VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf - - Args: - in_channels: (int) number of input channels (typically 1) - out_channels: (int) number of output channels - conv_kernel_size: convolution channels - pooling_kernel_size: the size of the pooling window to take a max over - num_conv_layers: (int) number of convolution layers - input_dim: (int) input dimension - conv_stride: the stride of the convolving kernel. - Can be a single number or a tuple (sH, sW) Default: 1 - padding: implicit paddings on both sides of the input. - Can be a single number or a tuple (padH, padW). Default: None - layer_norm: (bool) if layer norm is going to be applied. Default: False - - Shape: - Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) - Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) - """ - - def __init__( - self, - in_channels, - out_channels, - conv_kernel_size, - pooling_kernel_size, - num_conv_layers, - input_dim, - conv_stride=1, - padding=None, - layer_norm=False, - ): - assert ( - input_dim is not None - ), "Need input_dim for LayerNorm and infer_conv_output_dim" - super(VGGBlock, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.conv_kernel_size = _pair(conv_kernel_size) - self.pooling_kernel_size = _pair(pooling_kernel_size) - self.num_conv_layers = num_conv_layers - self.padding = ( - tuple(e // 2 for e in self.conv_kernel_size) - if padding is None - else _pair(padding) - ) - self.conv_stride = _pair(conv_stride) - - self.layers = nn.ModuleList() - for layer in range(num_conv_layers): - conv_op = nn.Conv2d( - in_channels if layer == 0 else out_channels, - out_channels, - self.conv_kernel_size, - stride=self.conv_stride, - padding=self.padding, - ) - self.layers.append(conv_op) - if layer_norm: - conv_output_dim, per_channel_dim = infer_conv_output_dim( - conv_op, input_dim, in_channels if layer == 0 else out_channels - ) - self.layers.append(nn.LayerNorm(per_channel_dim)) - input_dim = per_channel_dim - self.layers.append(nn.ReLU()) - - if self.pooling_kernel_size is not None: - pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True) - self.layers.append(pool_op) - self.total_output_dim, self.output_dim = infer_conv_output_dim( - pool_op, input_dim, out_channels - ) - - def forward(self, x): - for i, _ in enumerate(self.layers): - x = self.layers[i](x) - return x diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/tasks/__init__.py b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/tasks/__init__.py deleted file mode 100644 index 2d78ca98708121261aa365738a65c051b5b40626..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/tasks/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .discriminative_reranking_task import DiscriminativeRerankingNMTTask - - -__all__ = [ - "DiscriminativeRerankingNMTTask", -] diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh deleted file mode 100644 index 0428d8bef9d426ac3e664cd281ce0b688f5f580f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# -source_lang=kk_KZ -target_lang=en_XX -MODEL=criss_checkpoints/criss.3rd.pt -SPM=criss_checkpoints/sentence.bpe.model -SPLIT=test -LANG_DICT=criss_checkpoints/lang_dict.txt -ENCODER_ANALYSIS=sentence_retrieval/encoder_analysis.py -SAVE_ENCODER=save_encoder.py -ENCODER_SAVE_ROOT=sentence_embeddings/$MODEL - - - -DATA_DIR=data_tmp -INPUT_DIR=$DATA_DIR/${source_lang}-${target_lang}-tatoeba -ENCODER_SAVE_DIR=${ENCODER_SAVE_ROOT}/${source_lang}-${target_lang} -mkdir -p $ENCODER_SAVE_DIR/${target_lang} -mkdir -p $ENCODER_SAVE_DIR/${source_lang} - -# Save encoder outputs for source sentences -python $SAVE_ENCODER \ - ${INPUT_DIR} \ - --path ${MODEL} \ - --task translation_multi_simple_epoch \ - --lang-dict ${LANG_DICT} \ - --gen-subset ${SPLIT} \ - --bpe 'sentencepiece' \ - --lang-pairs ${source_lang}-${target_lang} \ - -s ${source_lang} -t ${target_lang} \ - --sentencepiece-model ${SPM} \ - --remove-bpe 'sentencepiece' \ - --beam 1 \ - --lang-tok-style mbart \ - --encoder-save-dir ${ENCODER_SAVE_DIR}/${source_lang} - -# Save encoder outputs for target sentences -python $SAVE_ENCODER \ - ${INPUT_DIR} \ - --path ${MODEL} \ - --lang-dict ${LANG_DICT} \ - --task translation_multi_simple_epoch \ - --gen-subset ${SPLIT} \ - --bpe 'sentencepiece' \ - --lang-pairs ${target_lang}-${source_lang} \ - -t ${source_lang} -s ${target_lang} \ - --sentencepiece-model ${SPM} \ - --remove-bpe 'sentencepiece' \ - --beam 1 \ - --lang-tok-style mbart \ - --encoder-save-dir ${ENCODER_SAVE_DIR}/${target_lang} - -# Analyze sentence retrieval accuracy -python $ENCODER_ANALYSIS --langs "${source_lang},${target_lang}" ${ENCODER_SAVE_DIR} diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode.sh b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode.sh deleted file mode 100644 index b34c5b6e0688914a53515162f817a93617b609e5..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select_decode.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -split="dev_other" -ref_txt="" # ground truth transcript path -psd_txt="" # pseudo transcript path -get_best_wer=true -dec_name="decode" -graph_name="graph" -kenlm_path=/checkpoint/abaevski/data/speech/libri/librispeech_lm_novox.phnc_o6.bin - -. ./cmd.sh -. ./path.sh -. parse_options.sh - -exp_root=$1 -unsup_args="" -if [ $# -ge 2 ]; then - unsup_args=$2 -fi - -set -eu - -if [ ! -z $ref_txt ] && $get_best_wer; then - echo "==== WER w.r.t. real transcript (select based on unsupervised metric)" - for x in $exp_root/*/${dec_name}_${split}*; do - lang=$(dirname $x)/$graph_name - - ( - for tra in $x/scoring/*.tra; do - cat $tra | utils/int2sym.pl -f 2- $lang/words.txt | sed 's:::g' | sed 's:::g' > $tra.txt - python local/unsup_select.py $psd_txt $tra.txt --kenlm_path $kenlm_path --gt_tra $ref_txt $unsup_args - done 2>/dev/null | grep "score=" | sed 's/=/ /g' | sed 's/;//g' | sort -k3n | head -n1 - ) & - done -fi -wait - diff --git a/spaces/OkamiFeng/Bark-with-Voice-Cloning/swap_voice.py b/spaces/OkamiFeng/Bark-with-Voice-Cloning/swap_voice.py deleted file mode 100644 index be1135be3648f9757046de1f9a4e240bd818be5a..0000000000000000000000000000000000000000 --- a/spaces/OkamiFeng/Bark-with-Voice-Cloning/swap_voice.py +++ /dev/null @@ -1,62 +0,0 @@ -from bark.generation import load_codec_model, generate_text_semantic, grab_best_device -from bark import SAMPLE_RATE -from encodec.utils import convert_audio -from bark.hubert.hubert_manager import HuBERTManager -from bark.hubert.pre_kmeans_hubert import CustomHubert -from bark.hubert.customtokenizer import CustomTokenizer -from bark.api import semantic_to_waveform -from scipy.io.wavfile import write as write_wav -from util.helper import create_filename -from util.settings import Settings - - -import torchaudio -import torch -import os -import gradio - -def swap_voice_from_audio(swap_audio_filename, selected_speaker, tokenizer_lang, seed, batchcount, progress=gradio.Progress(track_tqdm=True)): - use_gpu = not os.environ.get("BARK_FORCE_CPU", False) - progress(0, desc="Loading Codec") - - # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer - hubert_manager = HuBERTManager() - hubert_manager.make_sure_hubert_installed() - hubert_manager.make_sure_tokenizer_installed(tokenizer_lang=tokenizer_lang) - - # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer - # Load HuBERT for semantic tokens - - # Load the HuBERT model - device = grab_best_device(use_gpu) - hubert_model = CustomHubert(checkpoint_path='./models/hubert/hubert.pt').to(device) - model = load_codec_model(use_gpu=use_gpu) - - # Load the CustomTokenizer model - tokenizer = CustomTokenizer.load_from_checkpoint(f'./models/hubert/{tokenizer_lang}_tokenizer.pth').to(device) # Automatically uses the right layers - - progress(0.25, desc="Converting WAV") - - # Load and pre-process the audio waveform - wav, sr = torchaudio.load(swap_audio_filename) - if wav.shape[0] == 2: # Stereo to mono if needed - wav = wav.mean(0, keepdim=True) - - wav = convert_audio(wav, sr, model.sample_rate, model.channels) - wav = wav.to(device) - semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate) - semantic_tokens = tokenizer.get_token(semantic_vectors) - - audio = semantic_to_waveform( - semantic_tokens, - history_prompt=selected_speaker, - temp=0.7, - silent=False, - output_full=False) - - settings = Settings('config.yaml') - - result = create_filename(settings.output_folder_path, None, "swapvoice",".wav") - write_wav(result, SAMPLE_RATE, audio) - return result - diff --git a/spaces/OlaWod/FreeVC/README.md b/spaces/OlaWod/FreeVC/README.md deleted file mode 100644 index 66ceea8c0c3a8abe147d899987aeddb88ed49a0e..0000000000000000000000000000000000000000 --- a/spaces/OlaWod/FreeVC/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: FreeVC -emoji: 🚀 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.13.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OlaWod/FreeVC/models.py b/spaces/OlaWod/FreeVC/models.py deleted file mode 100644 index 46b8aacb1bef18f6fad4c20c968b19125626799c..0000000000000000000000000000000000000000 --- a/spaces/OlaWod/FreeVC/models.py +++ /dev/null @@ -1,351 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - use_spk, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.use_spk = use_spk - - self.enc_p = Encoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if not self.use_spk: - self.enc_spk = SpeakerEncoder(model_hidden_size=gin_channels, model_embedding_size=gin_channels) - - def forward(self, c, spec, g=None, mel=None, c_lengths=None, spec_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - if spec_lengths == None: - spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device) - - if not self.use_spk: - g = self.enc_spk(mel.transpose(1,2)) - g = g.unsqueeze(-1) - - _, m_p, logs_p, _ = self.enc_p(c, c_lengths) - z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g) - z_p = self.flow(z, spec_mask, g=g) - - z_slice, ids_slice = commons.rand_slice_segments(z, spec_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - - return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, c, g=None, mel=None, c_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - if not self.use_spk: - g = self.enc_spk.embed_utterance(mel.transpose(1,2)) - g = g.unsqueeze(-1) - - z_p, m_p, logs_p, c_mask = self.enc_p(c, c_lengths) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g) - - return o diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/optim.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/optim.py deleted file mode 100644 index d39d3aaa546c17e831d21d1758b69e8c1609415e..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/optim.py +++ /dev/null @@ -1,15 +0,0 @@ -import torch - -from detectron2.config import LazyCall as L -from detectron2.solver.build import get_default_optimizer_params - -SGD = L(torch.optim.SGD)( - params=L(get_default_optimizer_params)( - # params.model is meant to be set to the model object, before instantiating - # the optimizer. - weight_decay_norm=0.0 - ), - lr=0.02, - momentum=0.9, - weight_decay=1e-4, -) diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/merge_cells.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/merge_cells.py deleted file mode 100644 index 48ca8cc0a8aca8432835bd760c0403a3c35b34cf..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/merge_cells.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import abstractmethod - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..cnn import ConvModule - - -class BaseMergeCell(nn.Module): - """The basic class for cells used in NAS-FPN and NAS-FCOS. - - BaseMergeCell takes 2 inputs. After applying convolution - on them, they are resized to the target size. Then, - they go through binary_op, which depends on the type of cell. - If with_out_conv is True, the result of output will go through - another convolution layer. - - Args: - in_channels (int): number of input channels in out_conv layer. - out_channels (int): number of output channels in out_conv layer. - with_out_conv (bool): Whether to use out_conv layer - out_conv_cfg (dict): Config dict for convolution layer, which should - contain "groups", "kernel_size", "padding", "bias" to build - out_conv layer. - out_norm_cfg (dict): Config dict for normalization layer in out_conv. - out_conv_order (tuple): The order of conv/norm/activation layers in - out_conv. - with_input1_conv (bool): Whether to use convolution on input1. - with_input2_conv (bool): Whether to use convolution on input2. - input_conv_cfg (dict): Config dict for building input1_conv layer and - input2_conv layer, which is expected to contain the type of - convolution. - Default: None, which means using conv2d. - input_norm_cfg (dict): Config dict for normalization layer in - input1_conv and input2_conv layer. Default: None. - upsample_mode (str): Interpolation method used to resize the output - of input1_conv and input2_conv to target size. Currently, we - support ['nearest', 'bilinear']. Default: 'nearest'. - """ - - def __init__(self, - fused_channels=256, - out_channels=256, - with_out_conv=True, - out_conv_cfg=dict( - groups=1, kernel_size=3, padding=1, bias=True), - out_norm_cfg=None, - out_conv_order=('act', 'conv', 'norm'), - with_input1_conv=False, - with_input2_conv=False, - input_conv_cfg=None, - input_norm_cfg=None, - upsample_mode='nearest'): - super(BaseMergeCell, self).__init__() - assert upsample_mode in ['nearest', 'bilinear'] - self.with_out_conv = with_out_conv - self.with_input1_conv = with_input1_conv - self.with_input2_conv = with_input2_conv - self.upsample_mode = upsample_mode - - if self.with_out_conv: - self.out_conv = ConvModule( - fused_channels, - out_channels, - **out_conv_cfg, - norm_cfg=out_norm_cfg, - order=out_conv_order) - - self.input1_conv = self._build_input_conv( - out_channels, input_conv_cfg, - input_norm_cfg) if with_input1_conv else nn.Sequential() - self.input2_conv = self._build_input_conv( - out_channels, input_conv_cfg, - input_norm_cfg) if with_input2_conv else nn.Sequential() - - def _build_input_conv(self, channel, conv_cfg, norm_cfg): - return ConvModule( - channel, - channel, - 3, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - bias=True) - - @abstractmethod - def _binary_op(self, x1, x2): - pass - - def _resize(self, x, size): - if x.shape[-2:] == size: - return x - elif x.shape[-2:] < size: - return F.interpolate(x, size=size, mode=self.upsample_mode) - else: - assert x.shape[-2] % size[-2] == 0 and x.shape[-1] % size[-1] == 0 - kernel_size = x.shape[-1] // size[-1] - x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size) - return x - - def forward(self, x1, x2, out_size=None): - assert x1.shape[:2] == x2.shape[:2] - assert out_size is None or len(out_size) == 2 - if out_size is None: # resize to larger one - out_size = max(x1.size()[2:], x2.size()[2:]) - - x1 = self.input1_conv(x1) - x2 = self.input2_conv(x2) - - x1 = self._resize(x1, out_size) - x2 = self._resize(x2, out_size) - - x = self._binary_op(x1, x2) - if self.with_out_conv: - x = self.out_conv(x) - return x - - -class SumCell(BaseMergeCell): - - def __init__(self, in_channels, out_channels, **kwargs): - super(SumCell, self).__init__(in_channels, out_channels, **kwargs) - - def _binary_op(self, x1, x2): - return x1 + x2 - - -class ConcatCell(BaseMergeCell): - - def __init__(self, in_channels, out_channels, **kwargs): - super(ConcatCell, self).__init__(in_channels * 2, out_channels, - **kwargs) - - def _binary_op(self, x1, x2): - ret = torch.cat([x1, x2], dim=1) - return ret - - -class GlobalPoolingCell(BaseMergeCell): - - def __init__(self, in_channels=None, out_channels=None, **kwargs): - super().__init__(in_channels, out_channels, **kwargs) - self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) - - def _binary_op(self, x1, x2): - x2_att = self.global_pool(x2).sigmoid() - return x2 + x2_att * x1 diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify_model.py b/spaces/PKUWilliamYang/VToonify/vtoonify_model.py deleted file mode 100644 index 8a506c2da195acafa2e6a18b3ef0874a58b5b15f..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify_model.py +++ /dev/null @@ -1,284 +0,0 @@ -from __future__ import annotations -import gradio as gr -import pathlib -import sys -sys.path.insert(0, 'vtoonify') - -from util import load_psp_standalone, get_video_crop_parameter, tensor2cv2 -import torch -import torch.nn as nn -import numpy as np -import dlib -import cv2 -from model.vtoonify import VToonify -from model.bisenet.model import BiSeNet -import torch.nn.functional as F -from torchvision import transforms -from model.encoder.align_all_parallel import align_face -import gc -import huggingface_hub -import os - -MODEL_REPO = 'PKUWilliamYang/VToonify' - -class Model(): - def __init__(self, device): - super().__init__() - - self.device = device - self.style_types = { - 'cartoon1': ['vtoonify_d_cartoon/vtoonify_s026_d0.5.pt', 26], - 'cartoon1-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 26], - 'cartoon2-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 64], - 'cartoon3-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 153], - 'cartoon4': ['vtoonify_d_cartoon/vtoonify_s299_d0.5.pt', 299], - 'cartoon4-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 299], - 'cartoon5-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 8], - 'comic1-d': ['vtoonify_d_comic/vtoonify_s_d.pt', 28], - 'comic2-d': ['vtoonify_d_comic/vtoonify_s_d.pt', 18], - 'arcane1': ['vtoonify_d_arcane/vtoonify_s000_d0.5.pt', 0], - 'arcane1-d': ['vtoonify_d_arcane/vtoonify_s_d.pt', 0], - 'arcane2': ['vtoonify_d_arcane/vtoonify_s077_d0.5.pt', 77], - 'arcane2-d': ['vtoonify_d_arcane/vtoonify_s_d.pt', 77], - 'caricature1': ['vtoonify_d_caricature/vtoonify_s039_d0.5.pt', 39], - 'caricature2': ['vtoonify_d_caricature/vtoonify_s068_d0.5.pt', 68], - 'pixar': ['vtoonify_d_pixar/vtoonify_s052_d0.5.pt', 52], - 'pixar-d': ['vtoonify_d_pixar/vtoonify_s_d.pt', 52], - 'illustration1-d': ['vtoonify_d_illustration/vtoonify_s054_d_c.pt', 54], - 'illustration2-d': ['vtoonify_d_illustration/vtoonify_s004_d_c.pt', 4], - 'illustration3-d': ['vtoonify_d_illustration/vtoonify_s009_d_c.pt', 9], - 'illustration4-d': ['vtoonify_d_illustration/vtoonify_s043_d_c.pt', 43], - 'illustration5-d': ['vtoonify_d_illustration/vtoonify_s086_d_c.pt', 86], - } - - self.landmarkpredictor = self._create_dlib_landmark_model() - self.parsingpredictor = self._create_parsing_model() - self.pspencoder = self._load_encoder() - self.transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), - ]) - - self.vtoonify, self.exstyle = self._load_default_model() - self.color_transfer = False - self.style_name = 'cartoon1' - self.video_limit_cpu = 100 - self.video_limit_gpu = 300 - - @staticmethod - def _create_dlib_landmark_model(): - return dlib.shape_predictor(huggingface_hub.hf_hub_download(MODEL_REPO, - 'models/shape_predictor_68_face_landmarks.dat')) - - def _create_parsing_model(self): - parsingpredictor = BiSeNet(n_classes=19) - parsingpredictor.load_state_dict(torch.load(huggingface_hub.hf_hub_download(MODEL_REPO, 'models/faceparsing.pth'), - map_location=lambda storage, loc: storage)) - parsingpredictor.to(self.device).eval() - return parsingpredictor - - def _load_encoder(self) -> nn.Module: - style_encoder_path = huggingface_hub.hf_hub_download(MODEL_REPO,'models/encoder.pt') - return load_psp_standalone(style_encoder_path, self.device) - - def _load_default_model(self) -> tuple[torch.Tensor, str]: - vtoonify = VToonify(backbone = 'dualstylegan') - vtoonify.load_state_dict(torch.load(huggingface_hub.hf_hub_download(MODEL_REPO, - 'models/vtoonify_d_cartoon/vtoonify_s026_d0.5.pt'), - map_location=lambda storage, loc: storage)['g_ema']) - vtoonify.to(self.device) - tmp = np.load(huggingface_hub.hf_hub_download(MODEL_REPO,'models/vtoonify_d_cartoon/exstyle_code.npy'), allow_pickle=True).item() - exstyle = torch.tensor(tmp[list(tmp.keys())[26]]).to(self.device) - with torch.no_grad(): - exstyle = vtoonify.zplus2wplus(exstyle) - return vtoonify, exstyle - - def load_model(self, style_type: str) -> tuple[torch.Tensor, str]: - if 'illustration' in style_type: - self.color_transfer = True - else: - self.color_transfer = False - if style_type not in self.style_types.keys(): - return None, 'Oops, wrong Style Type. Please select a valid model.' - self.style_name = style_type - model_path, ind = self.style_types[style_type] - style_path = os.path.join('models',os.path.dirname(model_path),'exstyle_code.npy') - self.vtoonify.load_state_dict(torch.load(huggingface_hub.hf_hub_download(MODEL_REPO,'models/'+model_path), - map_location=lambda storage, loc: storage)['g_ema']) - tmp = np.load(huggingface_hub.hf_hub_download(MODEL_REPO, style_path), allow_pickle=True).item() - exstyle = torch.tensor(tmp[list(tmp.keys())[ind]]).to(self.device) - with torch.no_grad(): - exstyle = self.vtoonify.zplus2wplus(exstyle) - return exstyle, 'Model of %s loaded.'%(style_type) - - def detect_and_align(self, frame, top, bottom, left, right, return_para=False): - message = 'Error: no face detected! Please retry or change the photo.' - paras = get_video_crop_parameter(frame, self.landmarkpredictor, [left, right, top, bottom]) - instyle = None - h, w, scale = 0, 0, 0 - if paras is not None: - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - # for HR image, we apply gaussian blur to it to avoid over-sharp stylization results - kernel_1d = np.array([[0.125],[0.375],[0.375],[0.125]]) - if scale <= 0.75: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - if scale <= 0.375: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - with torch.no_grad(): - I = align_face(frame, self.landmarkpredictor) - if I is not None: - I = self.transform(I).unsqueeze(dim=0).to(self.device) - instyle = self.pspencoder(I) - instyle = self.vtoonify.zplus2wplus(instyle) - message = 'Successfully rescale the frame to (%d, %d)'%(bottom-top, right-left) - else: - frame = np.zeros((256,256,3), np.uint8) - else: - frame = np.zeros((256,256,3), np.uint8) - if return_para: - return frame, instyle, message, w, h, top, bottom, left, right, scale - return frame, instyle, message - - #@torch.inference_mode() - def detect_and_align_image(self, image: str, top: int, bottom: int, left: int, right: int - ) -> tuple[np.ndarray, torch.Tensor, str]: - if image is None: - return np.zeros((256,256,3), np.uint8), None, 'Error: fail to load empty file.' - frame = cv2.imread(image) - if frame is None: - return np.zeros((256,256,3), np.uint8), None, 'Error: fail to load the image.' - frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - return self.detect_and_align(frame, top, bottom, left, right) - - def detect_and_align_video(self, video: str, top: int, bottom: int, left: int, right: int - ) -> tuple[np.ndarray, torch.Tensor, str]: - if video is None: - return np.zeros((256,256,3), np.uint8), None, 'Error: fail to load empty file.' - video_cap = cv2.VideoCapture(video) - if video_cap.get(7) == 0: - video_cap.release() - return np.zeros((256,256,3), np.uint8), torch.zeros(1,18,512).to(self.device), 'Error: fail to load the video.' - success, frame = video_cap.read() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - video_cap.release() - return self.detect_and_align(frame, top, bottom, left, right) - - def detect_and_align_full_video(self, video: str, top: int, bottom: int, left: int, right: int) -> tuple[str, torch.Tensor, str]: - message = 'Error: no face detected! Please retry or change the video.' - instyle = None - if video is None: - return 'default.mp4', instyle, 'Error: fail to load empty file.' - video_cap = cv2.VideoCapture(video) - if video_cap.get(7) == 0: - video_cap.release() - return 'default.mp4', instyle, 'Error: fail to load the video.' - num = min(self.video_limit_gpu, int(video_cap.get(7))) - if self.device == 'cpu': - num = min(self.video_limit_cpu, num) - success, frame = video_cap.read() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - frame, instyle, message, w, h, top, bottom, left, right, scale = self.detect_and_align(frame, top, bottom, left, right, True) - if instyle is None: - return 'default.mp4', instyle, message - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - videoWriter = cv2.VideoWriter('input.mp4', fourcc, video_cap.get(5), (int(right-left), int(bottom-top))) - videoWriter.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) - kernel_1d = np.array([[0.125],[0.375],[0.375],[0.125]]) - for i in range(num-1): - success, frame = video_cap.read() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - if scale <= 0.75: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - if scale <= 0.375: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - videoWriter.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) - - videoWriter.release() - video_cap.release() - - return 'input.mp4', instyle, 'Successfully rescale the video to (%d, %d)'%(bottom-top, right-left) - - def image_toonify(self, aligned_face: np.ndarray, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float, style_type: str) -> tuple[np.ndarray, str]: - #print(style_type + ' ' + self.style_name) - if instyle is None or aligned_face is None: - return np.zeros((256,256,3), np.uint8), 'Opps, something wrong with the input. Please go to Step 2 and Rescale Image/First Frame again.' - if self.style_name != style_type: - exstyle, _ = self.load_model(style_type) - if exstyle is None: - return np.zeros((256,256,3), np.uint8), 'Opps, something wrong with the style type. Please go to Step 1 and load model again.' - with torch.no_grad(): - if self.color_transfer: - s_w = exstyle - else: - s_w = instyle.clone() - s_w[:,:7] = exstyle[:,:7] - - x = self.transform(aligned_face).unsqueeze(dim=0).to(self.device) - x_p = F.interpolate(self.parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0], - scale_factor=0.5, recompute_scale_factor=False).detach() - inputs = torch.cat((x, x_p/16.), dim=1) - y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = style_degree) - y_tilde = torch.clamp(y_tilde, -1, 1) - print('*** Toonify %dx%d image with style of %s'%(y_tilde.shape[2], y_tilde.shape[3], style_type)) - return ((y_tilde[0].cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8), 'Successfully toonify the image with style of %s'%(self.style_name) - - def video_tooniy(self, aligned_video: str, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float, style_type: str) -> tuple[str, str]: - #print(style_type + ' ' + self.style_name) - if aligned_video is None: - return 'default.mp4', 'Opps, something wrong with the input. Please go to Step 2 and Rescale Video again.' - video_cap = cv2.VideoCapture(aligned_video) - if instyle is None or aligned_video is None or video_cap.get(7) == 0: - video_cap.release() - return 'default.mp4', 'Opps, something wrong with the input. Please go to Step 2 and Rescale Video again.' - if self.style_name != style_type: - exstyle, _ = self.load_model(style_type) - if exstyle is None: - return 'default.mp4', 'Opps, something wrong with the style type. Please go to Step 1 and load model again.' - num = min(self.video_limit_gpu, int(video_cap.get(7))) - if self.device == 'cpu': - num = min(self.video_limit_cpu, num) - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - videoWriter = cv2.VideoWriter('output.mp4', fourcc, - video_cap.get(5), (int(video_cap.get(3)*4), - int(video_cap.get(4)*4))) - - batch_frames = [] - if video_cap.get(3) != 0: - if self.device == 'cpu': - batch_size = max(1, int(4 * 256* 256/ video_cap.get(3) / video_cap.get(4))) - else: - batch_size = min(max(1, int(4 * 400 * 360/ video_cap.get(3) / video_cap.get(4))), 4) - else: - batch_size = 1 - print('*** Toonify using batch size of %d on %dx%d video of %d frames with style of %s'%(batch_size, int(video_cap.get(3)*4), int(video_cap.get(4)*4), num, style_type)) - with torch.no_grad(): - if self.color_transfer: - s_w = exstyle - else: - s_w = instyle.clone() - s_w[:,:7] = exstyle[:,:7] - for i in range(num): - success, frame = video_cap.read() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - batch_frames += [self.transform(frame).unsqueeze(dim=0).to(self.device)] - if len(batch_frames) == batch_size or (i+1) == num: - x = torch.cat(batch_frames, dim=0) - batch_frames = [] - with torch.no_grad(): - x_p = F.interpolate(self.parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0], - scale_factor=0.5, recompute_scale_factor=False).detach() - inputs = torch.cat((x, x_p/16.), dim=1) - y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), style_degree) - y_tilde = torch.clamp(y_tilde, -1, 1) - for k in range(y_tilde.size(0)): - videoWriter.write(tensor2cv2(y_tilde[k].cpu())) - gc.collect() - - videoWriter.release() - video_cap.release() - return 'output.mp4', 'Successfully toonify video of %d frames with style of %s'%(num, self.style_name) - - diff --git a/spaces/PSLD/PSLD/stable-diffusion/ldm/data/lsun.py b/spaces/PSLD/PSLD/stable-diffusion/ldm/data/lsun.py deleted file mode 100644 index 6256e45715ff0b57c53f985594d27cbbbff0e68e..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/stable-diffusion/ldm/data/lsun.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import numpy as np -import PIL -from PIL import Image -from torch.utils.data import Dataset -from torchvision import transforms - - -class LSUNBase(Dataset): - def __init__(self, - txt_file, - data_root, - size=None, - interpolation="bicubic", - flip_p=0.5 - ): - self.data_paths = txt_file - self.data_root = data_root - with open(self.data_paths, "r") as f: - self.image_paths = f.read().splitlines() - self._length = len(self.image_paths) - self.labels = { - "relative_file_path_": [l for l in self.image_paths], - "file_path_": [os.path.join(self.data_root, l) - for l in self.image_paths], - } - - self.size = size - self.interpolation = {"linear": PIL.Image.LINEAR, - "bilinear": PIL.Image.BILINEAR, - "bicubic": PIL.Image.BICUBIC, - "lanczos": PIL.Image.LANCZOS, - }[interpolation] - self.flip = transforms.RandomHorizontalFlip(p=flip_p) - - def __len__(self): - return self._length - - def __getitem__(self, i): - example = dict((k, self.labels[k][i]) for k in self.labels) - image = Image.open(example["file_path_"]) - if not image.mode == "RGB": - image = image.convert("RGB") - - # default to score-sde preprocessing - img = np.array(image).astype(np.uint8) - crop = min(img.shape[0], img.shape[1]) - h, w, = img.shape[0], img.shape[1] - img = img[(h - crop) // 2:(h + crop) // 2, - (w - crop) // 2:(w + crop) // 2] - - image = Image.fromarray(img) - if self.size is not None: - image = image.resize((self.size, self.size), resample=self.interpolation) - - image = self.flip(image) - image = np.array(image).astype(np.uint8) - example["image"] = (image / 127.5 - 1.0).astype(np.float32) - return example - - -class LSUNChurchesTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs) - - -class LSUNChurchesValidation(LSUNBase): - def __init__(self, flip_p=0., **kwargs): - super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches", - flip_p=flip_p, **kwargs) - - -class LSUNBedroomsTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs) - - -class LSUNBedroomsValidation(LSUNBase): - def __init__(self, flip_p=0.0, **kwargs): - super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms", - flip_p=flip_p, **kwargs) - - -class LSUNCatsTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs) - - -class LSUNCatsValidation(LSUNBase): - def __init__(self, flip_p=0., **kwargs): - super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats", - flip_p=flip_p, **kwargs) diff --git a/spaces/PSLD/PSLD/stable-diffusion/ldm/models/diffusion/dpm_solver/sampler.py b/spaces/PSLD/PSLD/stable-diffusion/ldm/models/diffusion/dpm_solver/sampler.py deleted file mode 100644 index 2c42d6f964d92658e769df95a81dec92250e5a99..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/stable-diffusion/ldm/models/diffusion/dpm_solver/sampler.py +++ /dev/null @@ -1,82 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch - -from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver - - -class DPMSolverSampler(object): - def __init__(self, model, **kwargs): - super().__init__() - self.model = model - to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) - self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - - # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') - - device = self.model.betas.device - if x_T is None: - img = torch.randn(size, device=device) - else: - img = x_T - - ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) - - model_fn = model_wrapper( - lambda x, t, c: self.model.apply_model(x, t, c), - ns, - model_type="noise", - guidance_type="classifier-free", - condition=conditioning, - unconditional_condition=unconditional_conditioning, - guidance_scale=unconditional_guidance_scale, - ) - - dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) - x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) - - return x.to(device), None diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/texinfo/indexing.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/texinfo/indexing.go deleted file mode 100644 index 66883122d6d1d8e55e1dbb9ebcbe9288dcbf5af8..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/texinfo/indexing.go and /dev/null differ diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/ema_head.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/ema_head.py deleted file mode 100644 index 12267cb40569d2b5a4a2955a6dc2671377ff5e0a..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/ema_head.py +++ /dev/null @@ -1,168 +0,0 @@ -import math - -import torch -import torch.distributed as dist -import torch.nn as nn -import torch.nn.functional as F -from annotator.uniformer.mmcv.cnn import ConvModule - -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -def reduce_mean(tensor): - """Reduce mean when distributed training.""" - if not (dist.is_available() and dist.is_initialized()): - return tensor - tensor = tensor.clone() - dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) - return tensor - - -class EMAModule(nn.Module): - """Expectation Maximization Attention Module used in EMANet. - - Args: - channels (int): Channels of the whole module. - num_bases (int): Number of bases. - num_stages (int): Number of the EM iterations. - """ - - def __init__(self, channels, num_bases, num_stages, momentum): - super(EMAModule, self).__init__() - assert num_stages >= 1, 'num_stages must be at least 1!' - self.num_bases = num_bases - self.num_stages = num_stages - self.momentum = momentum - - bases = torch.zeros(1, channels, self.num_bases) - bases.normal_(0, math.sqrt(2. / self.num_bases)) - # [1, channels, num_bases] - bases = F.normalize(bases, dim=1, p=2) - self.register_buffer('bases', bases) - - def forward(self, feats): - """Forward function.""" - batch_size, channels, height, width = feats.size() - # [batch_size, channels, height*width] - feats = feats.view(batch_size, channels, height * width) - # [batch_size, channels, num_bases] - bases = self.bases.repeat(batch_size, 1, 1) - - with torch.no_grad(): - for i in range(self.num_stages): - # [batch_size, height*width, num_bases] - attention = torch.einsum('bcn,bck->bnk', feats, bases) - attention = F.softmax(attention, dim=2) - # l1 norm - attention_normed = F.normalize(attention, dim=1, p=1) - # [batch_size, channels, num_bases] - bases = torch.einsum('bcn,bnk->bck', feats, attention_normed) - # l2 norm - bases = F.normalize(bases, dim=1, p=2) - - feats_recon = torch.einsum('bck,bnk->bcn', bases, attention) - feats_recon = feats_recon.view(batch_size, channels, height, width) - - if self.training: - bases = bases.mean(dim=0, keepdim=True) - bases = reduce_mean(bases) - # l2 norm - bases = F.normalize(bases, dim=1, p=2) - self.bases = (1 - - self.momentum) * self.bases + self.momentum * bases - - return feats_recon - - -@HEADS.register_module() -class EMAHead(BaseDecodeHead): - """Expectation Maximization Attention Networks for Semantic Segmentation. - - This head is the implementation of `EMANet - `_. - - Args: - ema_channels (int): EMA module channels - num_bases (int): Number of bases. - num_stages (int): Number of the EM iterations. - concat_input (bool): Whether concat the input and output of convs - before classification layer. Default: True - momentum (float): Momentum to update the base. Default: 0.1. - """ - - def __init__(self, - ema_channels, - num_bases, - num_stages, - concat_input=True, - momentum=0.1, - **kwargs): - super(EMAHead, self).__init__(**kwargs) - self.ema_channels = ema_channels - self.num_bases = num_bases - self.num_stages = num_stages - self.concat_input = concat_input - self.momentum = momentum - self.ema_module = EMAModule(self.ema_channels, self.num_bases, - self.num_stages, self.momentum) - - self.ema_in_conv = ConvModule( - self.in_channels, - self.ema_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - # project (0, inf) -> (-inf, inf) - self.ema_mid_conv = ConvModule( - self.ema_channels, - self.ema_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=None, - act_cfg=None) - for param in self.ema_mid_conv.parameters(): - param.requires_grad = False - - self.ema_out_conv = ConvModule( - self.ema_channels, - self.ema_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=None) - self.bottleneck = ConvModule( - self.ema_channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - if self.concat_input: - self.conv_cat = ConvModule( - self.in_channels + self.channels, - self.channels, - kernel_size=3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - feats = self.ema_in_conv(x) - identity = feats - feats = self.ema_mid_conv(feats) - recon = self.ema_module(feats) - recon = F.relu(recon, inplace=True) - recon = self.ema_out_conv(recon) - output = F.relu(identity + recon, inplace=True) - output = self.bottleneck(output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/models/test_multibanddiffusion.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/models/test_multibanddiffusion.py deleted file mode 100644 index 2702a3cb5fe402bf96911dbc992d2749cb18a4c0..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/models/test_multibanddiffusion.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random - -import numpy as np -import torch -from audiocraft.models.multibanddiffusion import MultiBandDiffusion, DiffusionProcess -from audiocraft.models import EncodecModel, DiffusionUnet -from audiocraft.modules import SEANetEncoder, SEANetDecoder -from audiocraft.modules.diffusion_schedule import NoiseSchedule -from audiocraft.quantization import DummyQuantizer - - -class TestMBD: - - def _create_mbd(self, - sample_rate: int, - channels: int, - n_filters: int = 3, - n_residual_layers: int = 1, - ratios: list = [5, 4, 3, 2], - num_steps: int = 1000, - codec_dim: int = 128, - **kwargs): - frame_rate = np.prod(ratios) - encoder = SEANetEncoder(channels=channels, dimension=codec_dim, n_filters=n_filters, - n_residual_layers=n_residual_layers, ratios=ratios) - decoder = SEANetDecoder(channels=channels, dimension=codec_dim, n_filters=n_filters, - n_residual_layers=n_residual_layers, ratios=ratios) - quantizer = DummyQuantizer() - compression_model = EncodecModel(encoder, decoder, quantizer, frame_rate=frame_rate, - sample_rate=sample_rate, channels=channels, **kwargs) - diffusion_model = DiffusionUnet(chin=channels, num_steps=num_steps, codec_dim=codec_dim) - schedule = NoiseSchedule(device='cpu', num_steps=num_steps) - DP = DiffusionProcess(model=diffusion_model, noise_schedule=schedule) - mbd = MultiBandDiffusion(DPs=[DP], codec_model=compression_model) - return mbd - - def test_model(self): - random.seed(1234) - sample_rate = 24_000 - channels = 1 - codec_dim = 128 - mbd = self._create_mbd(sample_rate=sample_rate, channels=channels, codec_dim=codec_dim) - for _ in range(10): - length = random.randrange(1, 10_000) - x = torch.randn(2, channels, length) - res = mbd.regenerate(x, sample_rate) - assert res.shape == x.shape diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/req/constructors.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/req/constructors.py deleted file mode 100644 index dea7c3b0116267a62f2b79af8be2e19d72eb2d96..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/req/constructors.py +++ /dev/null @@ -1,501 +0,0 @@ -"""Backing implementation for InstallRequirement's various constructors - -The idea here is that these formed a major chunk of InstallRequirement's size -so, moving them and support code dedicated to them outside of that class -helps creates for better understandability for the rest of the code. - -These are meant to be used elsewhere within pip to create instances of -InstallRequirement. -""" - -import logging -import os -import re -from typing import Any, Dict, Optional, Set, Tuple, Union - -from pip._vendor.packaging.markers import Marker -from pip._vendor.packaging.requirements import InvalidRequirement, Requirement -from pip._vendor.packaging.specifiers import Specifier - -from pip._internal.exceptions import InstallationError -from pip._internal.models.index import PyPI, TestPyPI -from pip._internal.models.link import Link -from pip._internal.models.wheel import Wheel -from pip._internal.req.req_file import ParsedRequirement -from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.filetypes import is_archive_file -from pip._internal.utils.misc import is_installable_dir -from pip._internal.utils.packaging import get_requirement -from pip._internal.utils.urls import path_to_url -from pip._internal.vcs import is_url, vcs - -__all__ = [ - "install_req_from_editable", - "install_req_from_line", - "parse_editable", -] - -logger = logging.getLogger(__name__) -operators = Specifier._operators.keys() - - -def _strip_extras(path: str) -> Tuple[str, Optional[str]]: - m = re.match(r"^(.+)(\[[^\]]+\])$", path) - extras = None - if m: - path_no_extras = m.group(1) - extras = m.group(2) - else: - path_no_extras = path - - return path_no_extras, extras - - -def convert_extras(extras: Optional[str]) -> Set[str]: - if not extras: - return set() - return get_requirement("placeholder" + extras.lower()).extras - - -def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]: - """Parses an editable requirement into: - - a requirement name - - an URL - - extras - - editable options - Accepted requirements: - svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir - .[some_extra] - """ - - url = editable_req - - # If a file path is specified with extras, strip off the extras. - url_no_extras, extras = _strip_extras(url) - - if os.path.isdir(url_no_extras): - # Treating it as code that has already been checked out - url_no_extras = path_to_url(url_no_extras) - - if url_no_extras.lower().startswith("file:"): - package_name = Link(url_no_extras).egg_fragment - if extras: - return ( - package_name, - url_no_extras, - get_requirement("placeholder" + extras.lower()).extras, - ) - else: - return package_name, url_no_extras, set() - - for version_control in vcs: - if url.lower().startswith(f"{version_control}:"): - url = f"{version_control}+{url}" - break - - link = Link(url) - - if not link.is_vcs: - backends = ", ".join(vcs.all_schemes) - raise InstallationError( - f"{editable_req} is not a valid editable requirement. " - f"It should either be a path to a local project or a VCS URL " - f"(beginning with {backends})." - ) - - package_name = link.egg_fragment - if not package_name: - raise InstallationError( - "Could not detect requirement name for '{}', please specify one " - "with #egg=your_package_name".format(editable_req) - ) - return package_name, url, set() - - -def check_first_requirement_in_file(filename: str) -> None: - """Check if file is parsable as a requirements file. - - This is heavily based on ``pkg_resources.parse_requirements``, but - simplified to just check the first meaningful line. - - :raises InvalidRequirement: If the first meaningful line cannot be parsed - as an requirement. - """ - with open(filename, encoding="utf-8", errors="ignore") as f: - # Create a steppable iterator, so we can handle \-continuations. - lines = ( - line - for line in (line.strip() for line in f) - if line and not line.startswith("#") # Skip blank lines/comments. - ) - - for line in lines: - # Drop comments -- a hash without a space may be in a URL. - if " #" in line: - line = line[: line.find(" #")] - # If there is a line continuation, drop it, and append the next line. - if line.endswith("\\"): - line = line[:-2].strip() + next(lines, "") - Requirement(line) - return - - -def deduce_helpful_msg(req: str) -> str: - """Returns helpful msg in case requirements file does not exist, - or cannot be parsed. - - :params req: Requirements file path - """ - if not os.path.exists(req): - return f" File '{req}' does not exist." - msg = " The path does exist. " - # Try to parse and check if it is a requirements file. - try: - check_first_requirement_in_file(req) - except InvalidRequirement: - logger.debug("Cannot parse '%s' as requirements file", req) - else: - msg += ( - f"The argument you provided " - f"({req}) appears to be a" - f" requirements file. If that is the" - f" case, use the '-r' flag to install" - f" the packages specified within it." - ) - return msg - - -class RequirementParts: - def __init__( - self, - requirement: Optional[Requirement], - link: Optional[Link], - markers: Optional[Marker], - extras: Set[str], - ): - self.requirement = requirement - self.link = link - self.markers = markers - self.extras = extras - - -def parse_req_from_editable(editable_req: str) -> RequirementParts: - name, url, extras_override = parse_editable(editable_req) - - if name is not None: - try: - req: Optional[Requirement] = Requirement(name) - except InvalidRequirement: - raise InstallationError(f"Invalid requirement: '{name}'") - else: - req = None - - link = Link(url) - - return RequirementParts(req, link, None, extras_override) - - -# ---- The actual constructors follow ---- - - -def install_req_from_editable( - editable_req: str, - comes_from: Optional[Union[InstallRequirement, str]] = None, - use_pep517: Optional[bool] = None, - isolated: bool = False, - options: Optional[Dict[str, Any]] = None, - constraint: bool = False, - user_supplied: bool = False, - permit_editable_wheels: bool = False, - config_settings: Optional[Dict[str, str]] = None, -) -> InstallRequirement: - - parts = parse_req_from_editable(editable_req) - - return InstallRequirement( - parts.requirement, - comes_from=comes_from, - user_supplied=user_supplied, - editable=True, - permit_editable_wheels=permit_editable_wheels, - link=parts.link, - constraint=constraint, - use_pep517=use_pep517, - isolated=isolated, - install_options=options.get("install_options", []) if options else [], - global_options=options.get("global_options", []) if options else [], - hash_options=options.get("hashes", {}) if options else {}, - config_settings=config_settings, - extras=parts.extras, - ) - - -def _looks_like_path(name: str) -> bool: - """Checks whether the string "looks like" a path on the filesystem. - - This does not check whether the target actually exists, only judge from the - appearance. - - Returns true if any of the following conditions is true: - * a path separator is found (either os.path.sep or os.path.altsep); - * a dot is found (which represents the current directory). - """ - if os.path.sep in name: - return True - if os.path.altsep is not None and os.path.altsep in name: - return True - if name.startswith("."): - return True - return False - - -def _get_url_from_path(path: str, name: str) -> Optional[str]: - """ - First, it checks whether a provided path is an installable directory. If it - is, returns the path. - - If false, check if the path is an archive file (such as a .whl). - The function checks if the path is a file. If false, if the path has - an @, it will treat it as a PEP 440 URL requirement and return the path. - """ - if _looks_like_path(name) and os.path.isdir(path): - if is_installable_dir(path): - return path_to_url(path) - # TODO: The is_installable_dir test here might not be necessary - # now that it is done in load_pyproject_toml too. - raise InstallationError( - f"Directory {name!r} is not installable. Neither 'setup.py' " - "nor 'pyproject.toml' found." - ) - if not is_archive_file(path): - return None - if os.path.isfile(path): - return path_to_url(path) - urlreq_parts = name.split("@", 1) - if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]): - # If the path contains '@' and the part before it does not look - # like a path, try to treat it as a PEP 440 URL req instead. - return None - logger.warning( - "Requirement %r looks like a filename, but the file does not exist", - name, - ) - return path_to_url(path) - - -def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts: - if is_url(name): - marker_sep = "; " - else: - marker_sep = ";" - if marker_sep in name: - name, markers_as_string = name.split(marker_sep, 1) - markers_as_string = markers_as_string.strip() - if not markers_as_string: - markers = None - else: - markers = Marker(markers_as_string) - else: - markers = None - name = name.strip() - req_as_string = None - path = os.path.normpath(os.path.abspath(name)) - link = None - extras_as_string = None - - if is_url(name): - link = Link(name) - else: - p, extras_as_string = _strip_extras(path) - url = _get_url_from_path(p, name) - if url is not None: - link = Link(url) - - # it's a local file, dir, or url - if link: - # Handle relative file URLs - if link.scheme == "file" and re.search(r"\.\./", link.url): - link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path)))) - # wheel file - if link.is_wheel: - wheel = Wheel(link.filename) # can raise InvalidWheelFilename - req_as_string = f"{wheel.name}=={wheel.version}" - else: - # set the req to the egg fragment. when it's not there, this - # will become an 'unnamed' requirement - req_as_string = link.egg_fragment - - # a requirement specifier - else: - req_as_string = name - - extras = convert_extras(extras_as_string) - - def with_source(text: str) -> str: - if not line_source: - return text - return f"{text} (from {line_source})" - - def _parse_req_string(req_as_string: str) -> Requirement: - try: - req = get_requirement(req_as_string) - except InvalidRequirement: - if os.path.sep in req_as_string: - add_msg = "It looks like a path." - add_msg += deduce_helpful_msg(req_as_string) - elif "=" in req_as_string and not any( - op in req_as_string for op in operators - ): - add_msg = "= is not a valid operator. Did you mean == ?" - else: - add_msg = "" - msg = with_source(f"Invalid requirement: {req_as_string!r}") - if add_msg: - msg += f"\nHint: {add_msg}" - raise InstallationError(msg) - else: - # Deprecate extras after specifiers: "name>=1.0[extras]" - # This currently works by accident because _strip_extras() parses - # any extras in the end of the string and those are saved in - # RequirementParts - for spec in req.specifier: - spec_str = str(spec) - if spec_str.endswith("]"): - msg = f"Extras after version '{spec_str}'." - raise InstallationError(msg) - return req - - if req_as_string is not None: - req: Optional[Requirement] = _parse_req_string(req_as_string) - else: - req = None - - return RequirementParts(req, link, markers, extras) - - -def install_req_from_line( - name: str, - comes_from: Optional[Union[str, InstallRequirement]] = None, - use_pep517: Optional[bool] = None, - isolated: bool = False, - options: Optional[Dict[str, Any]] = None, - constraint: bool = False, - line_source: Optional[str] = None, - user_supplied: bool = False, - config_settings: Optional[Dict[str, str]] = None, -) -> InstallRequirement: - """Creates an InstallRequirement from a name, which might be a - requirement, directory containing 'setup.py', filename, or URL. - - :param line_source: An optional string describing where the line is from, - for logging purposes in case of an error. - """ - parts = parse_req_from_line(name, line_source) - - return InstallRequirement( - parts.requirement, - comes_from, - link=parts.link, - markers=parts.markers, - use_pep517=use_pep517, - isolated=isolated, - install_options=options.get("install_options", []) if options else [], - global_options=options.get("global_options", []) if options else [], - hash_options=options.get("hashes", {}) if options else {}, - config_settings=config_settings, - constraint=constraint, - extras=parts.extras, - user_supplied=user_supplied, - ) - - -def install_req_from_req_string( - req_string: str, - comes_from: Optional[InstallRequirement] = None, - isolated: bool = False, - use_pep517: Optional[bool] = None, - user_supplied: bool = False, - config_settings: Optional[Dict[str, str]] = None, -) -> InstallRequirement: - try: - req = get_requirement(req_string) - except InvalidRequirement: - raise InstallationError(f"Invalid requirement: '{req_string}'") - - domains_not_allowed = [ - PyPI.file_storage_domain, - TestPyPI.file_storage_domain, - ] - if ( - req.url - and comes_from - and comes_from.link - and comes_from.link.netloc in domains_not_allowed - ): - # Explicitly disallow pypi packages that depend on external urls - raise InstallationError( - "Packages installed from PyPI cannot depend on packages " - "which are not also hosted on PyPI.\n" - "{} depends on {} ".format(comes_from.name, req) - ) - - return InstallRequirement( - req, - comes_from, - isolated=isolated, - use_pep517=use_pep517, - user_supplied=user_supplied, - config_settings=config_settings, - ) - - -def install_req_from_parsed_requirement( - parsed_req: ParsedRequirement, - isolated: bool = False, - use_pep517: Optional[bool] = None, - user_supplied: bool = False, - config_settings: Optional[Dict[str, str]] = None, -) -> InstallRequirement: - if parsed_req.is_editable: - req = install_req_from_editable( - parsed_req.requirement, - comes_from=parsed_req.comes_from, - use_pep517=use_pep517, - constraint=parsed_req.constraint, - isolated=isolated, - user_supplied=user_supplied, - config_settings=config_settings, - ) - - else: - req = install_req_from_line( - parsed_req.requirement, - comes_from=parsed_req.comes_from, - use_pep517=use_pep517, - isolated=isolated, - options=parsed_req.options, - constraint=parsed_req.constraint, - line_source=parsed_req.line_source, - user_supplied=user_supplied, - config_settings=config_settings, - ) - return req - - -def install_req_from_link_and_ireq( - link: Link, ireq: InstallRequirement -) -> InstallRequirement: - return InstallRequirement( - req=ireq.req, - comes_from=ireq.comes_from, - editable=ireq.editable, - link=link, - markers=ireq.markers, - use_pep517=ireq.use_pep517, - isolated=ireq.isolated, - install_options=ireq.install_options, - global_options=ireq.global_options, - hash_options=ireq.hash_options, - config_settings=ireq.config_settings, - user_supplied=ireq.user_supplied, - ) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py deleted file mode 100644 index ea363d86a564b5450666aa00aecd46353326a75a..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py +++ /dev/null @@ -1,170 +0,0 @@ -from contextlib import suppress -from io import TextIOWrapper - -from . import abc - - -class SpecLoaderAdapter: - """ - Adapt a package spec to adapt the underlying loader. - """ - - def __init__(self, spec, adapter=lambda spec: spec.loader): - self.spec = spec - self.loader = adapter(spec) - - def __getattr__(self, name): - return getattr(self.spec, name) - - -class TraversableResourcesLoader: - """ - Adapt a loader to provide TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - def get_resource_reader(self, name): - return CompatibilityFiles(self.spec)._native() - - -def _io_wrapper(file, mode='r', *args, **kwargs): - if mode == 'r': - return TextIOWrapper(file, *args, **kwargs) - elif mode == 'rb': - return file - raise ValueError( - "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode) - ) - - -class CompatibilityFiles: - """ - Adapter for an existing or non-existent resource reader - to provide a compatibility .files(). - """ - - class SpecPath(abc.Traversable): - """ - Path tied to a module spec. - Can be read and exposes the resource reader children. - """ - - def __init__(self, spec, reader): - self._spec = spec - self._reader = reader - - def iterdir(self): - if not self._reader: - return iter(()) - return iter( - CompatibilityFiles.ChildPath(self._reader, path) - for path in self._reader.contents() - ) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - if not self._reader: - return CompatibilityFiles.OrphanPath(other) - return CompatibilityFiles.ChildPath(self._reader, other) - - @property - def name(self): - return self._spec.name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs) - - class ChildPath(abc.Traversable): - """ - Path tied to a resource reader child. - Can be read but doesn't expose any meaningful children. - """ - - def __init__(self, reader, name): - self._reader = reader - self._name = name - - def iterdir(self): - return iter(()) - - def is_file(self): - return self._reader.is_resource(self.name) - - def is_dir(self): - return not self.is_file() - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(self.name, other) - - @property - def name(self): - return self._name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper( - self._reader.open_resource(self.name), mode, *args, **kwargs - ) - - class OrphanPath(abc.Traversable): - """ - Orphan path, not tied to a module spec or resource reader. - Can't be read and doesn't expose any meaningful children. - """ - - def __init__(self, *path_parts): - if len(path_parts) < 1: - raise ValueError('Need at least one path part to construct a path') - self._path = path_parts - - def iterdir(self): - return iter(()) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(*self._path, other) - - @property - def name(self): - return self._path[-1] - - def open(self, mode='r', *args, **kwargs): - raise FileNotFoundError("Can't open orphan path") - - def __init__(self, spec): - self.spec = spec - - @property - def _reader(self): - with suppress(AttributeError): - return self.spec.loader.get_resource_reader(self.spec.name) - - def _native(self): - """ - Return the native reader if it supports files(). - """ - reader = self._reader - return reader if hasattr(reader, 'files') else self - - def __getattr__(self, attr): - return getattr(self._reader, attr) - - def files(self): - return CompatibilityFiles.SpecPath(self.spec, self._reader) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - """ - return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/dist_info.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/dist_info.py deleted file mode 100644 index 0685c94596f2e74642ecf57b33b6c20f937d03c0..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/dist_info.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -Create a dist_info directory -As defined in the wheel specification -""" - -import os -import re -import shutil -import sys -import warnings -from contextlib import contextmanager -from inspect import cleandoc -from pathlib import Path - -from distutils.core import Command -from distutils import log -from setuptools.extern import packaging -from setuptools._deprecation_warning import SetuptoolsDeprecationWarning - - -class dist_info(Command): - - description = 'create a .dist-info directory' - - user_options = [ - ('egg-base=', 'e', "directory containing .egg-info directories" - " (default: top of the source tree)" - " DEPRECATED: use --output-dir."), - ('output-dir=', 'o', "directory inside of which the .dist-info will be" - "created (default: top of the source tree)"), - ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), - ('tag-build=', 'b', "Specify explicit tag to add to version number"), - ('no-date', 'D', "Don't include date stamp [default]"), - ('keep-egg-info', None, "*TRANSITIONAL* will be removed in the future"), - ] - - boolean_options = ['tag-date', 'keep-egg-info'] - negative_opt = {'no-date': 'tag-date'} - - def initialize_options(self): - self.egg_base = None - self.output_dir = None - self.name = None - self.dist_info_dir = None - self.tag_date = None - self.tag_build = None - self.keep_egg_info = False - - def finalize_options(self): - if self.egg_base: - msg = "--egg-base is deprecated for dist_info command. Use --output-dir." - warnings.warn(msg, SetuptoolsDeprecationWarning) - self.output_dir = self.egg_base or self.output_dir - - dist = self.distribution - project_dir = dist.src_root or os.curdir - self.output_dir = Path(self.output_dir or project_dir) - - egg_info = self.reinitialize_command("egg_info") - egg_info.egg_base = str(self.output_dir) - - if self.tag_date: - egg_info.tag_date = self.tag_date - else: - self.tag_date = egg_info.tag_date - - if self.tag_build: - egg_info.tag_build = self.tag_build - else: - self.tag_build = egg_info.tag_build - - egg_info.finalize_options() - self.egg_info = egg_info - - name = _safe(dist.get_name()) - version = _version(dist.get_version()) - self.name = f"{name}-{version}" - self.dist_info_dir = os.path.join(self.output_dir, f"{self.name}.dist-info") - - @contextmanager - def _maybe_bkp_dir(self, dir_path: str, requires_bkp: bool): - if requires_bkp: - bkp_name = f"{dir_path}.__bkp__" - _rm(bkp_name, ignore_errors=True) - _copy(dir_path, bkp_name, dirs_exist_ok=True, symlinks=True) - try: - yield - finally: - _rm(dir_path, ignore_errors=True) - shutil.move(bkp_name, dir_path) - else: - yield - - def run(self): - self.output_dir.mkdir(parents=True, exist_ok=True) - self.egg_info.run() - egg_info_dir = self.egg_info.egg_info - assert os.path.isdir(egg_info_dir), ".egg-info dir should have been created" - - log.info("creating '{}'".format(os.path.abspath(self.dist_info_dir))) - bdist_wheel = self.get_finalized_command('bdist_wheel') - - # TODO: if bdist_wheel if merged into setuptools, just add "keep_egg_info" there - with self._maybe_bkp_dir(egg_info_dir, self.keep_egg_info): - bdist_wheel.egg2dist(egg_info_dir, self.dist_info_dir) - - -def _safe(component: str) -> str: - """Escape a component used to form a wheel name according to PEP 491""" - return re.sub(r"[^\w\d.]+", "_", component) - - -def _version(version: str) -> str: - """Convert an arbitrary string to a version string.""" - v = version.replace(' ', '.') - try: - return str(packaging.version.Version(v)).replace("-", "_") - except packaging.version.InvalidVersion: - msg = f"""Invalid version: {version!r}. - !!\n\n - ################### - # Invalid version # - ################### - {version!r} is not valid according to PEP 440.\n - Please make sure specify a valid version for your package. - Also note that future releases of setuptools may halt the build process - if an invalid version is given. - \n\n!! - """ - warnings.warn(cleandoc(msg)) - return _safe(v).strip("_") - - -def _rm(dir_name, **opts): - if os.path.isdir(dir_name): - shutil.rmtree(dir_name, **opts) - - -def _copy(src, dst, **opts): - if sys.version_info < (3, 8): - opts.pop("dirs_exist_ok", None) - shutil.copytree(src, dst, **opts) diff --git a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/InvISP/model/model.py b/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/InvISP/model/model.py deleted file mode 100644 index 52938290b7ca895a7c71173d40f90df5cd51b0d0..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/InvISP/model/model.py +++ /dev/null @@ -1,196 +0,0 @@ -import math -import torch -import torch.nn as nn -import torch.nn.functional as F -import numpy as np -import torch.nn.init as init - -from .modules import InvertibleConv1x1 - - -def initialize_weights(net_l, scale=1): - if not isinstance(net_l, list): - net_l = [net_l] - for net in net_l: - for m in net.modules(): - if isinstance(m, nn.Conv2d): - init.kaiming_normal_(m.weight, a=0, mode="fan_in") - m.weight.data *= scale # for residual block - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - init.kaiming_normal_(m.weight, a=0, mode="fan_in") - m.weight.data *= scale - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.BatchNorm2d): - init.constant_(m.weight, 1) - init.constant_(m.bias.data, 0.0) - - -def initialize_weights_xavier(net_l, scale=1): - if not isinstance(net_l, list): - net_l = [net_l] - for net in net_l: - for m in net.modules(): - if isinstance(m, nn.Conv2d): - init.xavier_normal_(m.weight) - m.weight.data *= scale # for residual block - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - init.xavier_normal_(m.weight) - m.weight.data *= scale - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.BatchNorm2d): - init.constant_(m.weight, 1) - init.constant_(m.bias.data, 0.0) - - -class DenseBlock(nn.Module): - def __init__(self, channel_in, channel_out, init="xavier", gc=32, bias=True): - super(DenseBlock, self).__init__() - self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias) - self.conv2 = nn.Conv2d(channel_in + gc, gc, 3, 1, 1, bias=bias) - self.conv3 = nn.Conv2d(channel_in + 2 * gc, gc, 3, 1, 1, bias=bias) - self.conv4 = nn.Conv2d(channel_in + 3 * gc, gc, 3, 1, 1, bias=bias) - self.conv5 = nn.Conv2d(channel_in + 4 * gc, channel_out, 3, 1, 1, bias=bias) - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - if init == "xavier": - initialize_weights_xavier( - [self.conv1, self.conv2, self.conv3, self.conv4], 0.1 - ) - else: - initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4], 0.1) - initialize_weights(self.conv5, 0) - - def forward(self, x): - x1 = self.lrelu(self.conv1(x)) - x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) - x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) - x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) - x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) - - return x5 - - -def subnet(net_structure, init="xavier"): - def constructor(channel_in, channel_out): - if net_structure == "DBNet": - if init == "xavier": - return DenseBlock(channel_in, channel_out, init) - else: - return DenseBlock(channel_in, channel_out) - # return UNetBlock(channel_in, channel_out) - else: - return None - - return constructor - - -class InvBlock(nn.Module): - def __init__(self, subnet_constructor, channel_num, channel_split_num, clamp=0.8): - super(InvBlock, self).__init__() - # channel_num: 3 - # channel_split_num: 1 - - self.split_len1 = channel_split_num # 1 - self.split_len2 = channel_num - channel_split_num # 2 - - self.clamp = clamp - - self.F = subnet_constructor(self.split_len2, self.split_len1) - self.G = subnet_constructor(self.split_len1, self.split_len2) - self.H = subnet_constructor(self.split_len1, self.split_len2) - - in_channels = 3 - self.invconv = InvertibleConv1x1(in_channels, LU_decomposed=True) - self.flow_permutation = lambda z, logdet, rev: self.invconv(z, logdet, rev) - - def forward(self, x, rev=False): - if not rev: - # invert1x1conv - x, logdet = self.flow_permutation(x, logdet=0, rev=False) - - # split to 1 channel and 2 channel. - x1, x2 = ( - x.narrow(1, 0, self.split_len1), - x.narrow(1, self.split_len1, self.split_len2), - ) - - y1 = x1 + self.F(x2) # 1 channel - self.s = self.clamp * (torch.sigmoid(self.H(y1)) * 2 - 1) - y2 = x2.mul(torch.exp(self.s)) + self.G(y1) # 2 channel - out = torch.cat((y1, y2), 1) - else: - # split. - x1, x2 = ( - x.narrow(1, 0, self.split_len1), - x.narrow(1, self.split_len1, self.split_len2), - ) - self.s = self.clamp * (torch.sigmoid(self.H(x1)) * 2 - 1) - y2 = (x2 - self.G(x1)).div(torch.exp(self.s)) - y1 = x1 - self.F(y2) - - x = torch.cat((y1, y2), 1) - - # inv permutation - out, logdet = self.flow_permutation(x, logdet=0, rev=True) - - return out - - -class InvISPNet(nn.Module): - def __init__( - self, - channel_in=3, - channel_out=3, - subnet_constructor=subnet("DBNet"), - block_num=8, - ): - super(InvISPNet, self).__init__() - operations = [] - - current_channel = channel_in - channel_num = channel_in - channel_split_num = 1 - - for j in range(block_num): - b = InvBlock( - subnet_constructor, channel_num, channel_split_num - ) # one block is one flow step. - operations.append(b) - - self.operations = nn.ModuleList(operations) - - self.initialize() - - def initialize(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - init.xavier_normal_(m.weight) - m.weight.data *= 1.0 # for residual block - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - init.xavier_normal_(m.weight) - m.weight.data *= 1.0 - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.BatchNorm2d): - init.constant_(m.weight, 1) - init.constant_(m.bias.data, 0.0) - - def forward(self, x, rev=False): - out = x # x: [N,3,H,W] - - if not rev: - for op in self.operations: - out = op.forward(out, rev) - else: - for op in reversed(self.operations): - out = op.forward(out, rev) - - return out diff --git a/spaces/Riksarkivet/htr_demo/.github/README.md b/spaces/Riksarkivet/htr_demo/.github/README.md deleted file mode 100644 index 2cb53a551e08bfcc30de7abf37a196dc00874c98..0000000000000000000000000000000000000000 --- a/spaces/Riksarkivet/htr_demo/.github/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# HTRFLOW: A demo app for HTR - -We're thrilled to introduce [HTRFLOW](https://huggingface.co/spaces/Riksarkivet/htr_demo), our demonstration platform that brings to life the process of transcribing Swedish handwritten documents from the 17th to the 19th century. - -

    - HTRFLOW Image -

    - -HTRFLOW is designed to provide users with a step-by-step visualization of the HTR-process, and offer non-expert users an inside look into the workings of an AI-transcription pipeline. -At the moment HTRFLOW is mainly a demo-application. It’s not intended for production, but instead to showcase the immense possibilities that HTR-technology is opening up for cultural heritage institutions around the world. - -All code is open-source, all our models are on [Hugging Face](https://huggingface.co/collections/Riksarkivet/models-for-handwritten-text-recognition-652692c6871f915e766de688) and are free to use, and all data will be made available for download and use on [Hugging Face](https://huggingface.co/datasets/Riksarkivet/placeholder_htr) as well. - -HTRFLOW is more than just a demo; it's a testament to the advancement of open source development of HTR. As we progress, the app will be renamed into HTRFLOW.app and HTRFLOW will evolve into multiple parts. HTRFLOW will become our foundational library that will serve as the backbone for a range of applications in the transcription domain. Note that the backend (src) for the app will be rewritten and packaged to be more optimized under the project name [HTR_SVEA](https://github.com/Borg93/htr_svea) (possibly renamed into HTRFLOW.core). - -## Run app - -Use virtual env. - -``` -python3 -m venv .venv -source .venv/bin/activate -``` - -Install libraries with Makefile: - -``` -make install -``` - -With pip: - -``` -pip install -r requirements.txt -``` - -Run app with: - -``` -gradio app.py -``` - -## Run with Docker - -There are two options: - -### Run with Docker locally - -Build container: - -``` -docker build --tag htrflow/htrflow-app . -``` - -Run container: - -``` -docker run -it -d --name htrflow-app -p 7000:7860 htrflow/htrflow-app:latest -``` - -### Run with Docker with HF - -You can also just run it from Hugging Face: - -``` -docker run -it -p 7860:7860 --platform=linux/amd64 --gpus all \ - -e registry.hf.space/riksarkivet-htr-demo:latest -``` diff --git a/spaces/Ritori/Yura_GPT/yura gpt gradio.py b/spaces/Ritori/Yura_GPT/yura gpt gradio.py deleted file mode 100644 index dd5a9affe25a563283161c9d1eb3980be1c57ecc..0000000000000000000000000000000000000000 --- a/spaces/Ritori/Yura_GPT/yura gpt gradio.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -os.system('pip install transformers') -os.system('pip install torch') -os.system('pip install tensorflow') -os.system('pip install flax') - -import gradio as gr -import random -from transformers import GPT2LMHeadModel, GPT2Tokenizer - -def remove_ellipsis(text): - ellipsis_index = text.find("……") - - if ellipsis_index != -1: - text = text[ellipsis_index + 3:] - - return text - -def visual_novel_interface(input_text): - - tokenizer = GPT2Tokenizer.from_pretrained("gpt2") - model = GPT2LMHeadModel.from_pretrained("./extracted_files/checkpoint-10000") - - # 对一个句子进行编码 - input_ids = tokenizer.encode(input_text, return_tensors='pt') - - # 生成文本 - output = model.generate(input_ids, max_length=100, num_return_sequences=1, temperature=0.7) - - # 解码生成的文本 - generated_text = tokenizer.decode(output[0], skip_special_tokens=True) - - output_text = remove_ellipsis(generated_text) - #print(type(output_text)) - - return output_text - -iface = gr.Interface( - fn=visual_novel_interface, - inputs="text", - outputs=[ "text"] - ) - -iface.launch(debug=True) \ No newline at end of file diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py deleted file mode 100644 index b45e758ac6cf8dfb0382d072fe09125bc7e9b888..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -from torch import nn -from torch.nn import functional as F - -from .registry import CONV_LAYERS - - -@CONV_LAYERS.register_module() -class Conv2dAdaptivePadding(nn.Conv2d): - """Implementation of 2D convolution in tensorflow with `padding` as "same", - which applies padding to input (if needed) so that input image gets fully - covered by filter and stride you specified. For stride 1, this will ensure - that output image size is same as input. For stride of 2, output dimensions - will be half, for example. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution. Default: 1 - padding (int or tuple, optional): Zero-padding added to both sides of - the input. Default: 0 - dilation (int or tuple, optional): Spacing between kernel elements. - Default: 1 - groups (int, optional): Number of blocked connections from input - channels to output channels. Default: 1 - bias (bool, optional): If ``True``, adds a learnable bias to the - output. Default: ``True`` - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True): - super().__init__(in_channels, out_channels, kernel_size, stride, 0, - dilation, groups, bias) - - def forward(self, x): - img_h, img_w = x.size()[-2:] - kernel_h, kernel_w = self.weight.size()[-2:] - stride_h, stride_w = self.stride - output_h = math.ceil(img_h / stride_h) - output_w = math.ceil(img_w / stride_w) - pad_h = ( - max((output_h - 1) * self.stride[0] + - (kernel_h - 1) * self.dilation[0] + 1 - img_h, 0)) - pad_w = ( - max((output_w - 1) * self.stride[1] + - (kernel_w - 1) * self.dilation[1] + 1 - img_w, 0)) - if pad_h > 0 or pad_w > 0: - x = F.pad(x, [ - pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2 - ]) - return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, - self.dilation, self.groups) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/fused_semantic_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/fused_semantic_head.py deleted file mode 100644 index 2aa6033eec17a30aeb68c0fdd218d8f0d41157e8..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/fused_semantic_head.py +++ /dev/null @@ -1,107 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, kaiming_init -from mmcv.runner import auto_fp16, force_fp32 - -from mmdet.models.builder import HEADS - - -@HEADS.register_module() -class FusedSemanticHead(nn.Module): - r"""Multi-level fused semantic segmentation head. - - .. code-block:: none - - in_1 -> 1x1 conv --- - | - in_2 -> 1x1 conv -- | - || - in_3 -> 1x1 conv - || - ||| /-> 1x1 conv (mask prediction) - in_4 -> 1x1 conv -----> 3x3 convs (*4) - | \-> 1x1 conv (feature) - in_5 -> 1x1 conv --- - """ # noqa: W605 - - def __init__(self, - num_ins, - fusion_level, - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=183, - ignore_label=255, - loss_weight=0.2, - conv_cfg=None, - norm_cfg=None): - super(FusedSemanticHead, self).__init__() - self.num_ins = num_ins - self.fusion_level = fusion_level - self.num_convs = num_convs - self.in_channels = in_channels - self.conv_out_channels = conv_out_channels - self.num_classes = num_classes - self.ignore_label = ignore_label - self.loss_weight = loss_weight - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.fp16_enabled = False - - self.lateral_convs = nn.ModuleList() - for i in range(self.num_ins): - self.lateral_convs.append( - ConvModule( - self.in_channels, - self.in_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=False)) - - self.convs = nn.ModuleList() - for i in range(self.num_convs): - in_channels = self.in_channels if i == 0 else conv_out_channels - self.convs.append( - ConvModule( - in_channels, - conv_out_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.conv_embedding = ConvModule( - conv_out_channels, - conv_out_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) - - self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label) - - def init_weights(self): - kaiming_init(self.conv_logits) - - @auto_fp16() - def forward(self, feats): - x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) - fused_size = tuple(x.shape[-2:]) - for i, feat in enumerate(feats): - if i != self.fusion_level: - feat = F.interpolate( - feat, size=fused_size, mode='bilinear', align_corners=True) - x += self.lateral_convs[i](feat) - - for i in range(self.num_convs): - x = self.convs[i](x) - - mask_pred = self.conv_logits(x) - x = self.conv_embedding(x) - return mask_pred, x - - @force_fp32(apply_to=('mask_pred', )) - def loss(self, mask_pred, labels): - labels = labels.squeeze(1).long() - loss_semantic_seg = self.criterion(mask_pred, labels) - loss_semantic_seg *= self.loss_weight - return loss_semantic_seg diff --git a/spaces/RuijiaTan/MultiPrincipalElementAlloyPropertyPredictor/Interface/empirical_parameter_calculator.py b/spaces/RuijiaTan/MultiPrincipalElementAlloyPropertyPredictor/Interface/empirical_parameter_calculator.py deleted file mode 100644 index 98fdec0f69c18e3a8909f4c21dd306662e06c365..0000000000000000000000000000000000000000 --- a/spaces/RuijiaTan/MultiPrincipalElementAlloyPropertyPredictor/Interface/empirical_parameter_calculator.py +++ /dev/null @@ -1,284 +0,0 @@ -"""empirical parameter calculator, includes calculation functions for 14 values: -'Enthalpy(kJ/mol)', 'std_enthalpy(kJ/mol)', 'average_atomic_radius', 'Delta(%)', 'Omega', 'Entropy(J/K*mol)', 'Tm(K)', -'std_Tm (%)', 'X', 'std_X(%)', 'VEC', 'std_VEC', 'Density(g/com^3)', 'Price(USD/kg)' -written by Will Nash and Zhipeng Li -version 2.1.1""" - -import itertools -import numpy as np -from pip._internal import main - -# main(['install', 'joblib']) -main(['install', 'matminer']) - -import matminer.utils.data as mm_data - -# the market price for most chemical elements, these data are retrieved from -# http://www.leonland.de/elements_by_price/en/list -price_dic = { - "Ag": 462, "Al": 1.91, "Au": 38189, "B": 2386, "Be": 831.6, "Bi": 10.34, "C": 24, "Ca": 5.93, "Cd": 1.98, "Ce": 7, - "Co": 59.5, "Cr": 7.64, "Cu": 5.9, "Dy": 350, "Er": 95, "Fe": 0.08, "Gd": 55, "Ge": 1833, "Hf": 1414, "Ho": 1400, - "In": 341.6, "Ir": 31186, "La": 7, "Li": 115.7, "Lu": 6269, "Mg": 2.26, "Mn": 2.06, "Mo": 16, "Nb": 42, "Nd": 60, - "Ni": 9.19, "Os": 12860, "P": 300, "Pb": 2.29, "Pd": 34401, "Pr": 85, "Pt": 26492, "Re": 1635, "Rh": 76840, - "Ru": 14720, "Sb": 7.05, "Sc": 15000, "Si": 1.91, "Sm": 14.35, "Sn": 20, "Sr": 5.4, "Ta": 238, "Tb": 550, - "Ti": 3.77, "Tm": 6200, "V": 22.6, "W": 25.52, "Y": 35, "Yb": 1600, "Zn": 2.83, "Zr": 23.14, "H": 23.64, - "He": 40.39, "N": 2.77, "O": 0.64, "F": 1900, "Ne": 629.9, "Na": 3.04, "S": 0.1, "Cl": 1.5, "Ar": 2.56, "K": 13.02, - "Ga": 278.2, "As": 1.74, "Se": 30.37, "Br": 4.4, "Kr": 1.4, "Rb": 14720, "Te": 55.68, "I": 28.00, - "Xe": 9.2, "Cs": 73400, "Ba": 550, "Eu": 258, "Hg": 38.44, "Tl": 7400, "Th": 176, "U": 57.76, -} - - -class EmpiricalParams(object): - """functions for returning the empirical parameters of alloy compositions where element list is a list of pymatgen - Elements that are in the alloy, and mol_ratio is their respective mole ratios """ - - def __init__(self, element_list, mol_ratio=None): - self.element_list = element_list - if mol_ratio is None: # assume that mol_ratio is evenly distributed amongst elements - mol_ratio = [1 / len(element_list)] * len(element_list) - self.mol_ratio = np.divide(mol_ratio, np.sum(mol_ratio)) - self.a = self.mean_atomic_radius() - self.delta = self.atomic_size_difference() - self.Tm = self.average_melting_point() - self.mix_entropy = self.entropy_mixing() - self.mix_enthalpy = self.enthalpy_mixing() - self.omega = self.calc_omega() - self.x = self.mean_electronegativity() - self.std_x = self.std_electronegativity() - self.vec = self.average_vec() - self.density = self.calc_density() - self.price = self.calc_price() - - # self.k = self.mean_bulk_modulus() - # self.std_k = self.std_bulk_modulus() - self.std_enthalpy = self.std_enthalpy_mixing() - self.std_Tm = self.std_melting_point() - self.vec_std = self.std_vec() - - ''' - 2. average atomic radius - ''' - def mean_atomic_radius(self): - """function to return the mean atomic size radius 平均原子尺寸半径 (a) of the alloy""" - radii = [] - for i in range(len(self.element_list)): - radii.append(self.element_list[i].atomic_radius) - avg_radii = np.dot(radii, self.mol_ratio) - return avg_radii - - ''' - 3. atomic size difference - ''' - def atomic_size_difference(self): - """function to return the atomic size difference 原子半径差比率 (delta) of the alloy""" - delta = 0 - radii = [] - for i in range(len(self.element_list)): - radii.append(self.element_list[i].atomic_radius) - - for j in range(len(self.element_list)): - delta += self.mol_ratio[j] * np.square((1 - np.divide(radii[j], self.a))) - - return np.sqrt(delta) - - ''' - 6. average melting point - ''' - def average_melting_point(self): - """function to return the average melting point 熔点的均值 (Tm) of the alloy""" - Tm = 0 - for i in range(len(self.element_list)): - Tm += self.mol_ratio[i] * self.element_list[i].melting_point - return Tm - - ''' - 7. standard melting point - ''' - def std_melting_point(self): - """function to return the standard deviation (in percentage) of melting points 熔点的标准偏差 (sigma_t) of the alloy""" - sigma_t = 0 - T = [] - for i in range(len(self.element_list)): - T.append(self.element_list[i].melting_point) - - for j in range(len(self.element_list)): - sigma_t += self.mol_ratio[j] * np.square((1 - np.divide(T[j], self.Tm))) - return np.sqrt(sigma_t) - - ''' - 1. entropy of mixing - ''' - def entropy_mixing(self): - """function to return entropy of mixing 混合熵 for alloy elements based on Boltzmann's hypothesis""" - entropy = 0 - for i in range(len(self.mol_ratio)): - if self.mol_ratio[i] > 0: - entropy += self.mol_ratio[i] * np.log(self.mol_ratio[i]) - return -8.31446261815324 * entropy - - ''' - 4. enthalpy of mixing - ''' - def enthalpy_mixing(self): - """function to return the sum enthalpy of mixing 混合焓和 of an alloy system based on binary mixtures and the molar - ratio """ - enthalpies = [] - mol_coefficients = [] - - for pair in itertools.combinations(self.element_list, 2): - enthalpies.append(mm_data.MixingEnthalpy().get_mixing_enthalpy(*pair)) - - for molies in itertools.combinations(self.mol_ratio, 2): - mol_coefficients.append(4 * np.product(molies)) - - enthalpy = np.dot(enthalpies, mol_coefficients) - return enthalpy - - ''' - 5. standard deviation of enthalpy - ''' - def std_enthalpy_mixing(self): - """function to return the standard deviation of enthalpy of mixing 混合焓的标准偏差 (sigma_h) of the alloy""" - sigma_h = 0 - H = np.zeros((len(self.element_list), len(self.element_list))) - for i in range(len(self.element_list)): - for j in range(len(self.element_list)): - if i != j: - H[i][j] = mm_data.MixingEnthalpy().get_mixing_enthalpy(self.element_list[i], self.element_list[j]) - - for i in range(len(self.element_list)): - for j in range(len(self.element_list)): - if i != j: - sigma_h += self.mol_ratio[i] * self.mol_ratio[j] * np.square(H[i][j] - self.enthalpy_mixing()) - sigma_h = sigma_h / 2 - return np.sqrt(sigma_h) - - ''' - 12. Omega omega - ''' - def calc_omega(self): - """function to return the omega value of the alloy""" - if np.abs(self.mix_enthalpy) < 1e-6: - self.mix_enthalpy = 1e-6 - return self.Tm * self.mix_entropy / (np.abs(self.mix_enthalpy) * 1000) - - ''' - 8. average electronegativity - ''' - def mean_electronegativity(self): - """function to return the mean electronegativity 电负性的均值 (x) of the alloy""" - x_list = [] - for i in range(len(self.element_list)): - x_list.append(self.element_list[i].X) - x_avg = np.dot(x_list, self.mol_ratio) - return x_avg - - ''' - 9. standard deviation of electronegativity - ''' - def std_electronegativity(self): - """function to return the standard deviation (in percentage) of electronegativity 电负性的标准偏差 (sigma_x) of the alloy""" - sigma_x = 0 - x_list = [] - for i in range(len(self.element_list)): - x_list.append(self.element_list[i].X) - - for j in range(len(self.element_list)): - sigma_x += self.mol_ratio[j] * np.square(x_list[j] - self.x) - return np.sqrt(sigma_x) / self.x - - ''' - 10. valence electron concentration - ''' - def num_ve(self, element): - """function to return the number of valence electron of the element元素的价电子""" - e_structure = element.full_electronic_structure - outer = element.full_electronic_structure[-1][0] - num_e = 0 - for t in e_structure: - if t[0] == outer - 1 and t[1] == 'd': - num_e += t[2] - if t[0] == outer: - num_e += t[2] - return num_e - - ''' - average of valence electron concentration - ''' - def average_vec(self): - """function to return the average of valence electron concentration 价电子浓度的均值 (vec) of the alloy""" - vec = 0 - for i in range(len(self.element_list)): - vec += self.mol_ratio[i] * self.num_ve(self.element_list[i]) - return vec - - ''' - 11. standard deviation of valence electron concentration - ''' - def std_vec(self): - """function to return the standard deviation of valence electron concentration 价电子浓度的标准偏差 (sigma_vec) of the alloy""" - sigma_vec = 0 - vec_list = [] - for i in range(len(self.element_list)): - vec_list.append(self.num_ve(self.element_list[i])) - - for j in range(len(self.element_list)): - sigma_vec += self.mol_ratio[j] * np.square(vec_list[j] - self.vec) - return np.sqrt(sigma_vec) - - ''' - average of bulk modulus - ''' - def mean_bulk_modulus(self): - """function to return the average of bulk modulus (k)体积弹性模量 of the alloy""" - k = 0 - for i in range(len(self.element_list)): - if self.element_list[i].bulk_modulus is None: - print(self.element_list[i]) - else: - k += self.mol_ratio[i] * self.element_list[i].bulk_modulus - return k - - ''' - standard deviation of bulk modulus - ''' - def std_bulk_modulus(self): - """function to return the standard deviation of bulk modulus (k)体积弹性模量 of the alloy""" - sigma_k = 0 - k_list = [] - for i in range(len(self.element_list)): - k_list.append(self.element_list[i].bulk_modulus) - - for j in range(len(self.element_list)): - if self.element_list[i].bulk_modulus is None: - print(self.element_list[i]) - else: - sigma_k += self.mol_ratio[j] * np.square(k_list[j] - self.k) - return np.sqrt(sigma_k) - - ''' - 13. density - ''' - def calc_density(self): - """function to return the density (g/cm^3) of the alloy""" - mass = 0 - volume = 0 - for i in range(len(self.element_list)): - mass += float(self.element_list[i].atomic_mass) * self.mol_ratio[i] - volume += self.mol_ratio[i] * self.element_list[i].molar_volume - return mass / volume - - ''' - 14. price/element cost - ''' - def calc_price(self): - """function to return the price (USD/kg) of the alloy""" - total_mass = 0 - total_price = 0 - for i in range(len(self.element_list)): - if not str(self.element_list[i]) in price_dic: - return 'unknown' - mass = float(self.element_list[i].atomic_mass) * self.mol_ratio[i] - total_mass += mass - total_price += mass * price_dic[str(self.element_list[i])] - return format(total_price / total_mass, '.2f') diff --git a/spaces/SUPERpuper/Text-to-image-AI-3/README.md b/spaces/SUPERpuper/Text-to-image-AI-3/README.md deleted file mode 100644 index f374375290397698a039ffb7e7bfb494c59fb6a1..0000000000000000000000000000000000000000 --- a/spaces/SUPERpuper/Text-to-image-AI-3/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text To Image AI 3 -emoji: 📈 -colorFrom: red -colorTo: purple -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Sa-m/YOLO-V7-Custom-Model-Pot-Hole-Detection/models/common.py b/spaces/Sa-m/YOLO-V7-Custom-Model-Pot-Hole-Detection/models/common.py deleted file mode 100644 index 111af708dea55cb11c8da3bb22d69e659ee78925..0000000000000000000000000000000000000000 --- a/spaces/Sa-m/YOLO-V7-Custom-Model-Pot-Hole-Detection/models/common.py +++ /dev/null @@ -1,2019 +0,0 @@ -import math -from copy import copy -from pathlib import Path - -import numpy as np -import pandas as pd -import requests -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision.ops import DeformConv2d -from PIL import Image -from torch.cuda import amp - -from utils.datasets import letterbox -from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh -from utils.plots import color_list, plot_one_box -from utils.torch_utils import time_synchronized - - -##### basic #### - -def autopad(k, p=None): # kernel, padding - # Pad to 'same' - if p is None: - p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad - return p - - -class MP(nn.Module): - def __init__(self, k=2): - super(MP, self).__init__() - self.m = nn.MaxPool2d(kernel_size=k, stride=k) - - def forward(self, x): - return self.m(x) - - -class SP(nn.Module): - def __init__(self, k=3, s=1): - super(SP, self).__init__() - self.m = nn.MaxPool2d(kernel_size=k, stride=s, padding=k // 2) - - def forward(self, x): - return self.m(x) - - -class ReOrg(nn.Module): - def __init__(self): - super(ReOrg, self).__init__() - - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1) - - -class Concat(nn.Module): - def __init__(self, dimension=1): - super(Concat, self).__init__() - self.d = dimension - - def forward(self, x): - return torch.cat(x, self.d) - - -class Chuncat(nn.Module): - def __init__(self, dimension=1): - super(Chuncat, self).__init__() - self.d = dimension - - def forward(self, x): - x1 = [] - x2 = [] - for xi in x: - xi1, xi2 = xi.chunk(2, self.d) - x1.append(xi1) - x2.append(xi2) - return torch.cat(x1+x2, self.d) - - -class Shortcut(nn.Module): - def __init__(self, dimension=0): - super(Shortcut, self).__init__() - self.d = dimension - - def forward(self, x): - return x[0]+x[1] - - -class Foldcut(nn.Module): - def __init__(self, dimension=0): - super(Foldcut, self).__init__() - self.d = dimension - - def forward(self, x): - x1, x2 = x.chunk(2, self.d) - return x1+x2 - - -class Conv(nn.Module): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Conv, self).__init__() - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) - - def forward(self, x): - return self.act(self.bn(self.conv(x))) - - def fuseforward(self, x): - return self.act(self.conv(x)) - - -class RobustConv(nn.Module): - # Robust convolution (use high kernel size 7-11 for: downsampling and other layers). Train for 300 - 450 epochs. - def __init__(self, c1, c2, k=7, s=1, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups - super(RobustConv, self).__init__() - self.conv_dw = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act) - self.conv1x1 = nn.Conv2d(c1, c2, 1, 1, 0, groups=1, bias=True) - self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None - - def forward(self, x): - x = x.to(memory_format=torch.channels_last) - x = self.conv1x1(self.conv_dw(x)) - if self.gamma is not None: - x = x.mul(self.gamma.reshape(1, -1, 1, 1)) - return x - - -class RobustConv2(nn.Module): - # Robust convolution 2 (use [32, 5, 2] or [32, 7, 4] or [32, 11, 8] for one of the paths in CSP). - def __init__(self, c1, c2, k=7, s=4, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups - super(RobustConv2, self).__init__() - self.conv_strided = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act) - self.conv_deconv = nn.ConvTranspose2d(in_channels=c1, out_channels=c2, kernel_size=s, stride=s, - padding=0, bias=True, dilation=1, groups=1 - ) - self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None - - def forward(self, x): - x = self.conv_deconv(self.conv_strided(x)) - if self.gamma is not None: - x = x.mul(self.gamma.reshape(1, -1, 1, 1)) - return x - - -def DWConv(c1, c2, k=1, s=1, act=True): - # Depthwise convolution - return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - -class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super(GhostConv, self).__init__() - c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) - - def forward(self, x): - y = self.cv1(x) - return torch.cat([y, self.cv2(y)], 1) - - -class Stem(nn.Module): - # Stem - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Stem, self).__init__() - c_ = int(c2/2) # hidden channels - self.cv1 = Conv(c1, c_, 3, 2) - self.cv2 = Conv(c_, c_, 1, 1) - self.cv3 = Conv(c_, c_, 3, 2) - self.pool = torch.nn.MaxPool2d(2, stride=2) - self.cv4 = Conv(2 * c_, c2, 1, 1) - - def forward(self, x): - x = self.cv1(x) - return self.cv4(torch.cat((self.cv3(self.cv2(x)), self.pool(x)), dim=1)) - - -class DownC(nn.Module): - # Spatial pyramid pooling layer used in YOLOv3-SPP - def __init__(self, c1, c2, n=1, k=2): - super(DownC, self).__init__() - c_ = int(c1) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c2//2, 3, k) - self.cv3 = Conv(c1, c2//2, 1, 1) - self.mp = nn.MaxPool2d(kernel_size=k, stride=k) - - def forward(self, x): - return torch.cat((self.cv2(self.cv1(x)), self.cv3(self.mp(x))), dim=1) - - -class SPP(nn.Module): - # Spatial pyramid pooling layer used in YOLOv3-SPP - def __init__(self, c1, c2, k=(5, 9, 13)): - super(SPP, self).__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) - self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) - - def forward(self, x): - x = self.cv1(x) - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) - - -class Bottleneck(nn.Module): - # Darknet bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super(Bottleneck, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c2, 3, 1, g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class Res(nn.Module): - # ResNet bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super(Res, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c_, 3, 1, g=g) - self.cv3 = Conv(c_, c2, 1, 1) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv3(self.cv2(self.cv1(x))) if self.add else self.cv3(self.cv2(self.cv1(x))) - - -class ResX(Res): - # ResNet bottleneck - def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__(c1, c2, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - - -class Ghost(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super(Ghost, self).__init__() - c_ = c2 // 2 - self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), - Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() - - def forward(self, x): - return self.conv(x) + self.shortcut(x) - -##### end of basic ##### - - -##### cspnet ##### - -class SPPCSPC(nn.Module): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)): - super(SPPCSPC, self).__init__() - c_ = int(2 * c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(c_, c_, 3, 1) - self.cv4 = Conv(c_, c_, 1, 1) - self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) - self.cv5 = Conv(4 * c_, c_, 1, 1) - self.cv6 = Conv(c_, c_, 3, 1) - self.cv7 = Conv(2 * c_, c2, 1, 1) - - def forward(self, x): - x1 = self.cv4(self.cv3(self.cv1(x))) - y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1))) - y2 = self.cv2(x) - return self.cv7(torch.cat((y1, y2), dim=1)) - -class GhostSPPCSPC(SPPCSPC): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)): - super().__init__(c1, c2, n, shortcut, g, e, k) - c_ = int(2 * c2 * e) # hidden channels - self.cv1 = GhostConv(c1, c_, 1, 1) - self.cv2 = GhostConv(c1, c_, 1, 1) - self.cv3 = GhostConv(c_, c_, 3, 1) - self.cv4 = GhostConv(c_, c_, 1, 1) - self.cv5 = GhostConv(4 * c_, c_, 1, 1) - self.cv6 = GhostConv(c_, c_, 3, 1) - self.cv7 = GhostConv(2 * c_, c2, 1, 1) - - -class GhostStem(Stem): - # Stem - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, p, g, act) - c_ = int(c2/2) # hidden channels - self.cv1 = GhostConv(c1, c_, 3, 2) - self.cv2 = GhostConv(c_, c_, 1, 1) - self.cv3 = GhostConv(c_, c_, 3, 2) - self.cv4 = GhostConv(2 * c_, c2, 1, 1) - - -class BottleneckCSPA(nn.Module): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(BottleneckCSPA, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.m(self.cv1(x)) - y2 = self.cv2(x) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class BottleneckCSPB(nn.Module): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(BottleneckCSPB, self).__init__() - c_ = int(c2) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - x1 = self.cv1(x) - y1 = self.m(x1) - y2 = self.cv2(x1) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class BottleneckCSPC(nn.Module): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(BottleneckCSPC, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(c_, c_, 1, 1) - self.cv4 = Conv(2 * c_, c2, 1, 1) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(torch.cat((y1, y2), dim=1)) - - -class ResCSPA(BottleneckCSPA): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class ResCSPB(BottleneckCSPB): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class ResCSPC(BottleneckCSPC): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class ResXCSPA(ResCSPA): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class ResXCSPB(ResCSPB): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class ResXCSPC(ResCSPC): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class GhostCSPA(BottleneckCSPA): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) - - -class GhostCSPB(BottleneckCSPB): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) - - -class GhostCSPC(BottleneckCSPC): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) - -##### end of cspnet ##### - - -##### yolor ##### - -class ImplicitA(nn.Module): - def __init__(self, channel, mean=0., std=.02): - super(ImplicitA, self).__init__() - self.channel = channel - self.mean = mean - self.std = std - self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1)) - nn.init.normal_(self.implicit, mean=self.mean, std=self.std) - - def forward(self, x): - return self.implicit + x - - -class ImplicitM(nn.Module): - def __init__(self, channel, mean=0., std=.02): - super(ImplicitM, self).__init__() - self.channel = channel - self.mean = mean - self.std = std - self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1)) - nn.init.normal_(self.implicit, mean=self.mean, std=self.std) - - def forward(self, x): - return self.implicit * x - -##### end of yolor ##### - - -##### repvgg ##### - -class RepConv(nn.Module): - # Represented convolution - # https://arxiv.org/abs/2101.03697 - - def __init__(self, c1, c2, k=3, s=1, p=None, g=1, act=True, deploy=False): - super(RepConv, self).__init__() - - self.deploy = deploy - self.groups = g - self.in_channels = c1 - self.out_channels = c2 - - assert k == 3 - assert autopad(k, p) == 1 - - padding_11 = autopad(k, p) - k // 2 - - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) - - if deploy: - self.rbr_reparam = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=True) - - else: - self.rbr_identity = (nn.BatchNorm2d(num_features=c1) if c2 == c1 and s == 1 else None) - - self.rbr_dense = nn.Sequential( - nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False), - nn.BatchNorm2d(num_features=c2), - ) - - self.rbr_1x1 = nn.Sequential( - nn.Conv2d( c1, c2, 1, s, padding_11, groups=g, bias=False), - nn.BatchNorm2d(num_features=c2), - ) - - def forward(self, inputs): - if hasattr(self, "rbr_reparam"): - return self.act(self.rbr_reparam(inputs)) - - if self.rbr_identity is None: - id_out = 0 - else: - id_out = self.rbr_identity(inputs) - - return self.act(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out) - - def get_equivalent_kernel_bias(self): - kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) - kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) - kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) - return ( - kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, - bias3x3 + bias1x1 + biasid, - ) - - def _pad_1x1_to_3x3_tensor(self, kernel1x1): - if kernel1x1 is None: - return 0 - else: - return nn.functional.pad(kernel1x1, [1, 1, 1, 1]) - - def _fuse_bn_tensor(self, branch): - if branch is None: - return 0, 0 - if isinstance(branch, nn.Sequential): - kernel = branch[0].weight - running_mean = branch[1].running_mean - running_var = branch[1].running_var - gamma = branch[1].weight - beta = branch[1].bias - eps = branch[1].eps - else: - assert isinstance(branch, nn.BatchNorm2d) - if not hasattr(self, "id_tensor"): - input_dim = self.in_channels // self.groups - kernel_value = np.zeros( - (self.in_channels, input_dim, 3, 3), dtype=np.float32 - ) - for i in range(self.in_channels): - kernel_value[i, i % input_dim, 1, 1] = 1 - self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) - kernel = self.id_tensor - running_mean = branch.running_mean - running_var = branch.running_var - gamma = branch.weight - beta = branch.bias - eps = branch.eps - std = (running_var + eps).sqrt() - t = (gamma / std).reshape(-1, 1, 1, 1) - return kernel * t, beta - running_mean * gamma / std - - def repvgg_convert(self): - kernel, bias = self.get_equivalent_kernel_bias() - return ( - kernel.detach().cpu().numpy(), - bias.detach().cpu().numpy(), - ) - - def fuse_conv_bn(self, conv, bn): - - std = (bn.running_var + bn.eps).sqrt() - bias = bn.bias - bn.running_mean * bn.weight / std - - t = (bn.weight / std).reshape(-1, 1, 1, 1) - weights = conv.weight * t - - bn = nn.Identity() - conv = nn.Conv2d(in_channels = conv.in_channels, - out_channels = conv.out_channels, - kernel_size = conv.kernel_size, - stride=conv.stride, - padding = conv.padding, - dilation = conv.dilation, - groups = conv.groups, - bias = True, - padding_mode = conv.padding_mode) - - conv.weight = torch.nn.Parameter(weights) - conv.bias = torch.nn.Parameter(bias) - return conv - - def fuse_repvgg_block(self): - if self.deploy: - return - print(f"RepConv.fuse_repvgg_block") - - self.rbr_dense = self.fuse_conv_bn(self.rbr_dense[0], self.rbr_dense[1]) - - self.rbr_1x1 = self.fuse_conv_bn(self.rbr_1x1[0], self.rbr_1x1[1]) - rbr_1x1_bias = self.rbr_1x1.bias - weight_1x1_expanded = torch.nn.functional.pad(self.rbr_1x1.weight, [1, 1, 1, 1]) - - # Fuse self.rbr_identity - if (isinstance(self.rbr_identity, nn.BatchNorm2d) or isinstance(self.rbr_identity, nn.modules.batchnorm.SyncBatchNorm)): - # print(f"fuse: rbr_identity == BatchNorm2d or SyncBatchNorm") - identity_conv_1x1 = nn.Conv2d( - in_channels=self.in_channels, - out_channels=self.out_channels, - kernel_size=1, - stride=1, - padding=0, - groups=self.groups, - bias=False) - identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.to(self.rbr_1x1.weight.data.device) - identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.squeeze().squeeze() - # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}") - identity_conv_1x1.weight.data.fill_(0.0) - identity_conv_1x1.weight.data.fill_diagonal_(1.0) - identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.unsqueeze(2).unsqueeze(3) - # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}") - - identity_conv_1x1 = self.fuse_conv_bn(identity_conv_1x1, self.rbr_identity) - bias_identity_expanded = identity_conv_1x1.bias - weight_identity_expanded = torch.nn.functional.pad(identity_conv_1x1.weight, [1, 1, 1, 1]) - else: - # print(f"fuse: rbr_identity != BatchNorm2d, rbr_identity = {self.rbr_identity}") - bias_identity_expanded = torch.nn.Parameter( torch.zeros_like(rbr_1x1_bias) ) - weight_identity_expanded = torch.nn.Parameter( torch.zeros_like(weight_1x1_expanded) ) - - - #print(f"self.rbr_1x1.weight = {self.rbr_1x1.weight.shape}, ") - #print(f"weight_1x1_expanded = {weight_1x1_expanded.shape}, ") - #print(f"self.rbr_dense.weight = {self.rbr_dense.weight.shape}, ") - - self.rbr_dense.weight = torch.nn.Parameter(self.rbr_dense.weight + weight_1x1_expanded + weight_identity_expanded) - self.rbr_dense.bias = torch.nn.Parameter(self.rbr_dense.bias + rbr_1x1_bias + bias_identity_expanded) - - self.rbr_reparam = self.rbr_dense - self.deploy = True - - if self.rbr_identity is not None: - del self.rbr_identity - self.rbr_identity = None - - if self.rbr_1x1 is not None: - del self.rbr_1x1 - self.rbr_1x1 = None - - if self.rbr_dense is not None: - del self.rbr_dense - self.rbr_dense = None - - -class RepBottleneck(Bottleneck): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__(c1, c2, shortcut=True, g=1, e=0.5) - c_ = int(c2 * e) # hidden channels - self.cv2 = RepConv(c_, c2, 3, 1, g=g) - - -class RepBottleneckCSPA(BottleneckCSPA): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class RepBottleneckCSPB(BottleneckCSPB): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class RepBottleneckCSPC(BottleneckCSPC): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class RepRes(Res): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__(c1, c2, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.cv2 = RepConv(c_, c_, 3, 1, g=g) - - -class RepResCSPA(ResCSPA): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResCSPB(ResCSPB): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResCSPC(ResCSPC): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResX(ResX): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__(c1, c2, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.cv2 = RepConv(c_, c_, 3, 1, g=g) - - -class RepResXCSPA(ResXCSPA): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResXCSPB(ResXCSPB): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResXCSPC(ResXCSPC): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - -##### end of repvgg ##### - - -##### transformer ##### - -class TransformerLayer(nn.Module): - # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) - def __init__(self, c, num_heads): - super().__init__() - self.q = nn.Linear(c, c, bias=False) - self.k = nn.Linear(c, c, bias=False) - self.v = nn.Linear(c, c, bias=False) - self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) - self.fc1 = nn.Linear(c, c, bias=False) - self.fc2 = nn.Linear(c, c, bias=False) - - def forward(self, x): - x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x - x = self.fc2(self.fc1(x)) + x - return x - - -class TransformerBlock(nn.Module): - # Vision Transformer https://arxiv.org/abs/2010.11929 - def __init__(self, c1, c2, num_heads, num_layers): - super().__init__() - self.conv = None - if c1 != c2: - self.conv = Conv(c1, c2) - self.linear = nn.Linear(c2, c2) # learnable position embedding - self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) - self.c2 = c2 - - def forward(self, x): - if self.conv is not None: - x = self.conv(x) - b, _, w, h = x.shape - p = x.flatten(2) - p = p.unsqueeze(0) - p = p.transpose(0, 3) - p = p.squeeze(3) - e = self.linear(p) - x = p + e - - x = self.tr(x) - x = x.unsqueeze(3) - x = x.transpose(0, 3) - x = x.reshape(b, self.c2, w, h) - return x - -##### end of transformer ##### - - -##### yolov5 ##### - -class Focus(nn.Module): - # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Focus, self).__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) - # self.contract = Contract(gain=2) - - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) - # return self.conv(self.contract(x)) - - -class SPPF(nn.Module): - # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher - def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * 4, c2, 1, 1) - self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) - - def forward(self, x): - x = self.cv1(x) - y1 = self.m(x) - y2 = self.m(y1) - return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) - - -class Contract(nn.Module): - # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' - s = self.gain - x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) - x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) - return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) - - -class Expand(nn.Module): - # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' - s = self.gain - x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) - x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) - - -class NMS(nn.Module): - # Non-Maximum Suppression (NMS) module - conf = 0.25 # confidence threshold - iou = 0.45 # IoU threshold - classes = None # (optional list) filter by class - - def __init__(self): - super(NMS, self).__init__() - - def forward(self, x): - return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) - - -class autoShape(nn.Module): - # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS - conf = 0.25 # NMS confidence threshold - iou = 0.45 # NMS IoU threshold - classes = None # (optional list) filter by class - - def __init__(self, model): - super(autoShape, self).__init__() - self.model = model.eval() - - def autoshape(self): - print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() - return self - - @torch.no_grad() - def forward(self, imgs, size=640, augment=False, profile=False): - # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # filename: imgs = 'data/samples/zidane.jpg' - # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' - # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) - # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) - # numpy: = np.zeros((640,1280,3)) # HWC - # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) - # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - - t = [time_synchronized()] - p = next(self.model.parameters()) # for device and type - if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(enabled=p.device.type != 'cpu'): - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference - - # Pre-process - n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images - shape0, shape1, files = [], [], [] # image and inference shapes, filenames - for i, im in enumerate(imgs): - f = f'image{i}' # filename - if isinstance(im, str): # filename or uri - im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im - elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(im), getattr(im, 'filename', f) or f - files.append(Path(f).with_suffix('.jpg').name) - if im.shape[0] < 5: # image in CHW - im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input - s = im.shape[:2] # HWC - shape0.append(s) # image shape - g = (size / max(s)) # gain - shape1.append([y * g for y in s]) - imgs[i] = im # update - shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape - x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad - x = np.stack(x, 0) if n > 1 else x[0][None] # stack - x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 - t.append(time_synchronized()) - - with amp.autocast(enabled=p.device.type != 'cpu'): - # Inference - y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) - - # Post-process - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) - - t.append(time_synchronized()) - return Detections(imgs, y, files, t, self.names, x.shape) - - -class Detections: - # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, files, times=None, names=None, shape=None): - super(Detections, self).__init__() - d = pred[0].device # device - gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations - self.imgs = imgs # list of images as numpy arrays - self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) - self.names = names # class names - self.files = files # image filenames - self.xyxy = pred # xyxy pixels - self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels - self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized - self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized - self.n = len(self.pred) # number of images (batch size) - self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) - self.s = shape # inference BCHW shape - - def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): - colors = color_list() - for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): - str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' - if pred is not None: - for c in pred[:, -1].unique(): - n = (pred[:, -1] == c).sum() # detections per class - str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string - if show or save or render: - for *box, conf, cls in pred: # xyxy, confidence, class - label = f'{self.names[int(cls)]} {conf:.2f}' - plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) - img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np - if pprint: - print(str.rstrip(', ')) - if show: - img.show(self.files[i]) # show - if save: - f = self.files[i] - img.save(Path(save_dir) / f) # save - print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') - if render: - self.imgs[i] = np.asarray(img) - - def print(self): - self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) - - def show(self): - self.display(show=True) # show results - - def save(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir - Path(save_dir).mkdir(parents=True, exist_ok=True) - self.display(save=True, save_dir=save_dir) # save results - - def render(self): - self.display(render=True) # render results - return self.imgs - - def pandas(self): - # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) - new = copy(self) # return copy - ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns - cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns - for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): - a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update - setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) - return new - - def tolist(self): - # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] - for d in x: - for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: - setattr(d, k, getattr(d, k)[0]) # pop out of list - return x - - def __len__(self): - return self.n - - -class Classify(nn.Module): - # Classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups - super(Classify, self).__init__() - self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) - self.flat = nn.Flatten() - - def forward(self, x): - z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list - return self.flat(self.conv(z)) # flatten to x(b,c2) - -##### end of yolov5 ###### - - -##### orepa ##### - -def transI_fusebn(kernel, bn): - gamma = bn.weight - std = (bn.running_var + bn.eps).sqrt() - return kernel * ((gamma / std).reshape(-1, 1, 1, 1)), bn.bias - bn.running_mean * gamma / std - - -class ConvBN(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, - stride=1, padding=0, dilation=1, groups=1, deploy=False, nonlinear=None): - super().__init__() - if nonlinear is None: - self.nonlinear = nn.Identity() - else: - self.nonlinear = nonlinear - if deploy: - self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, - stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True) - else: - self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, - stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False) - self.bn = nn.BatchNorm2d(num_features=out_channels) - - def forward(self, x): - if hasattr(self, 'bn'): - return self.nonlinear(self.bn(self.conv(x))) - else: - return self.nonlinear(self.conv(x)) - - def switch_to_deploy(self): - kernel, bias = transI_fusebn(self.conv.weight, self.bn) - conv = nn.Conv2d(in_channels=self.conv.in_channels, out_channels=self.conv.out_channels, kernel_size=self.conv.kernel_size, - stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups, bias=True) - conv.weight.data = kernel - conv.bias.data = bias - for para in self.parameters(): - para.detach_() - self.__delattr__('conv') - self.__delattr__('bn') - self.conv = conv - -class OREPA_3x3_RepConv(nn.Module): - - def __init__(self, in_channels, out_channels, kernel_size, - stride=1, padding=0, dilation=1, groups=1, - internal_channels_1x1_3x3=None, - deploy=False, nonlinear=None, single_init=False): - super(OREPA_3x3_RepConv, self).__init__() - self.deploy = deploy - - if nonlinear is None: - self.nonlinear = nn.Identity() - else: - self.nonlinear = nonlinear - - self.kernel_size = kernel_size - self.in_channels = in_channels - self.out_channels = out_channels - self.groups = groups - assert padding == kernel_size // 2 - - self.stride = stride - self.padding = padding - self.dilation = dilation - - self.branch_counter = 0 - - self.weight_rbr_origin = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), kernel_size, kernel_size)) - nn.init.kaiming_uniform_(self.weight_rbr_origin, a=math.sqrt(1.0)) - self.branch_counter += 1 - - - if groups < out_channels: - self.weight_rbr_avg_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1)) - self.weight_rbr_pfir_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1)) - nn.init.kaiming_uniform_(self.weight_rbr_avg_conv, a=1.0) - nn.init.kaiming_uniform_(self.weight_rbr_pfir_conv, a=1.0) - self.weight_rbr_avg_conv.data - self.weight_rbr_pfir_conv.data - self.register_buffer('weight_rbr_avg_avg', torch.ones(kernel_size, kernel_size).mul(1.0/kernel_size/kernel_size)) - self.branch_counter += 1 - - else: - raise NotImplementedError - self.branch_counter += 1 - - if internal_channels_1x1_3x3 is None: - internal_channels_1x1_3x3 = in_channels if groups < out_channels else 2 * in_channels # For mobilenet, it is better to have 2X internal channels - - if internal_channels_1x1_3x3 == in_channels: - self.weight_rbr_1x1_kxk_idconv1 = nn.Parameter(torch.zeros(in_channels, int(in_channels/self.groups), 1, 1)) - id_value = np.zeros((in_channels, int(in_channels/self.groups), 1, 1)) - for i in range(in_channels): - id_value[i, i % int(in_channels/self.groups), 0, 0] = 1 - id_tensor = torch.from_numpy(id_value).type_as(self.weight_rbr_1x1_kxk_idconv1) - self.register_buffer('id_tensor', id_tensor) - - else: - self.weight_rbr_1x1_kxk_conv1 = nn.Parameter(torch.Tensor(internal_channels_1x1_3x3, int(in_channels/self.groups), 1, 1)) - nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv1, a=math.sqrt(1.0)) - self.weight_rbr_1x1_kxk_conv2 = nn.Parameter(torch.Tensor(out_channels, int(internal_channels_1x1_3x3/self.groups), kernel_size, kernel_size)) - nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv2, a=math.sqrt(1.0)) - self.branch_counter += 1 - - expand_ratio = 8 - self.weight_rbr_gconv_dw = nn.Parameter(torch.Tensor(in_channels*expand_ratio, 1, kernel_size, kernel_size)) - self.weight_rbr_gconv_pw = nn.Parameter(torch.Tensor(out_channels, in_channels*expand_ratio, 1, 1)) - nn.init.kaiming_uniform_(self.weight_rbr_gconv_dw, a=math.sqrt(1.0)) - nn.init.kaiming_uniform_(self.weight_rbr_gconv_pw, a=math.sqrt(1.0)) - self.branch_counter += 1 - - if out_channels == in_channels and stride == 1: - self.branch_counter += 1 - - self.vector = nn.Parameter(torch.Tensor(self.branch_counter, self.out_channels)) - self.bn = nn.BatchNorm2d(out_channels) - - self.fre_init() - - nn.init.constant_(self.vector[0, :], 0.25) #origin - nn.init.constant_(self.vector[1, :], 0.25) #avg - nn.init.constant_(self.vector[2, :], 0.0) #prior - nn.init.constant_(self.vector[3, :], 0.5) #1x1_kxk - nn.init.constant_(self.vector[4, :], 0.5) #dws_conv - - - def fre_init(self): - prior_tensor = torch.Tensor(self.out_channels, self.kernel_size, self.kernel_size) - half_fg = self.out_channels/2 - for i in range(self.out_channels): - for h in range(3): - for w in range(3): - if i < half_fg: - prior_tensor[i, h, w] = math.cos(math.pi*(h+0.5)*(i+1)/3) - else: - prior_tensor[i, h, w] = math.cos(math.pi*(w+0.5)*(i+1-half_fg)/3) - - self.register_buffer('weight_rbr_prior', prior_tensor) - - def weight_gen(self): - - weight_rbr_origin = torch.einsum('oihw,o->oihw', self.weight_rbr_origin, self.vector[0, :]) - - weight_rbr_avg = torch.einsum('oihw,o->oihw', torch.einsum('oihw,hw->oihw', self.weight_rbr_avg_conv, self.weight_rbr_avg_avg), self.vector[1, :]) - - weight_rbr_pfir = torch.einsum('oihw,o->oihw', torch.einsum('oihw,ohw->oihw', self.weight_rbr_pfir_conv, self.weight_rbr_prior), self.vector[2, :]) - - weight_rbr_1x1_kxk_conv1 = None - if hasattr(self, 'weight_rbr_1x1_kxk_idconv1'): - weight_rbr_1x1_kxk_conv1 = (self.weight_rbr_1x1_kxk_idconv1 + self.id_tensor).squeeze() - elif hasattr(self, 'weight_rbr_1x1_kxk_conv1'): - weight_rbr_1x1_kxk_conv1 = self.weight_rbr_1x1_kxk_conv1.squeeze() - else: - raise NotImplementedError - weight_rbr_1x1_kxk_conv2 = self.weight_rbr_1x1_kxk_conv2 - - if self.groups > 1: - g = self.groups - t, ig = weight_rbr_1x1_kxk_conv1.size() - o, tg, h, w = weight_rbr_1x1_kxk_conv2.size() - weight_rbr_1x1_kxk_conv1 = weight_rbr_1x1_kxk_conv1.view(g, int(t/g), ig) - weight_rbr_1x1_kxk_conv2 = weight_rbr_1x1_kxk_conv2.view(g, int(o/g), tg, h, w) - weight_rbr_1x1_kxk = torch.einsum('gti,gothw->goihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2).view(o, ig, h, w) - else: - weight_rbr_1x1_kxk = torch.einsum('ti,othw->oihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2) - - weight_rbr_1x1_kxk = torch.einsum('oihw,o->oihw', weight_rbr_1x1_kxk, self.vector[3, :]) - - weight_rbr_gconv = self.dwsc2full(self.weight_rbr_gconv_dw, self.weight_rbr_gconv_pw, self.in_channels) - weight_rbr_gconv = torch.einsum('oihw,o->oihw', weight_rbr_gconv, self.vector[4, :]) - - weight = weight_rbr_origin + weight_rbr_avg + weight_rbr_1x1_kxk + weight_rbr_pfir + weight_rbr_gconv - - return weight - - def dwsc2full(self, weight_dw, weight_pw, groups): - - t, ig, h, w = weight_dw.size() - o, _, _, _ = weight_pw.size() - tg = int(t/groups) - i = int(ig*groups) - weight_dw = weight_dw.view(groups, tg, ig, h, w) - weight_pw = weight_pw.squeeze().view(o, groups, tg) - - weight_dsc = torch.einsum('gtihw,ogt->ogihw', weight_dw, weight_pw) - return weight_dsc.view(o, i, h, w) - - def forward(self, inputs): - weight = self.weight_gen() - out = F.conv2d(inputs, weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) - - return self.nonlinear(self.bn(out)) - -class RepConv_OREPA(nn.Module): - - def __init__(self, c1, c2, k=3, s=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False, nonlinear=nn.SiLU()): - super(RepConv_OREPA, self).__init__() - self.deploy = deploy - self.groups = groups - self.in_channels = c1 - self.out_channels = c2 - - self.padding = padding - self.dilation = dilation - self.groups = groups - - assert k == 3 - assert padding == 1 - - padding_11 = padding - k // 2 - - if nonlinear is None: - self.nonlinearity = nn.Identity() - else: - self.nonlinearity = nonlinear - - if use_se: - self.se = SEBlock(self.out_channels, internal_neurons=self.out_channels // 16) - else: - self.se = nn.Identity() - - if deploy: - self.rbr_reparam = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s, - padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode) - - else: - self.rbr_identity = nn.BatchNorm2d(num_features=self.in_channels) if self.out_channels == self.in_channels and s == 1 else None - self.rbr_dense = OREPA_3x3_RepConv(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s, padding=padding, groups=groups, dilation=1) - self.rbr_1x1 = ConvBN(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=1, stride=s, padding=padding_11, groups=groups, dilation=1) - print('RepVGG Block, identity = ', self.rbr_identity) - - - def forward(self, inputs): - if hasattr(self, 'rbr_reparam'): - return self.nonlinearity(self.se(self.rbr_reparam(inputs))) - - if self.rbr_identity is None: - id_out = 0 - else: - id_out = self.rbr_identity(inputs) - - out1 = self.rbr_dense(inputs) - out2 = self.rbr_1x1(inputs) - out3 = id_out - out = out1 + out2 + out3 - - return self.nonlinearity(self.se(out)) - - - # Optional. This improves the accuracy and facilitates quantization. - # 1. Cancel the original weight decay on rbr_dense.conv.weight and rbr_1x1.conv.weight. - # 2. Use like this. - # loss = criterion(....) - # for every RepVGGBlock blk: - # loss += weight_decay_coefficient * 0.5 * blk.get_cust_L2() - # optimizer.zero_grad() - # loss.backward() - - # Not used for OREPA - def get_custom_L2(self): - K3 = self.rbr_dense.weight_gen() - K1 = self.rbr_1x1.conv.weight - t3 = (self.rbr_dense.bn.weight / ((self.rbr_dense.bn.running_var + self.rbr_dense.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach() - t1 = (self.rbr_1x1.bn.weight / ((self.rbr_1x1.bn.running_var + self.rbr_1x1.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach() - - l2_loss_circle = (K3 ** 2).sum() - (K3[:, :, 1:2, 1:2] ** 2).sum() # The L2 loss of the "circle" of weights in 3x3 kernel. Use regular L2 on them. - eq_kernel = K3[:, :, 1:2, 1:2] * t3 + K1 * t1 # The equivalent resultant central point of 3x3 kernel. - l2_loss_eq_kernel = (eq_kernel ** 2 / (t3 ** 2 + t1 ** 2)).sum() # Normalize for an L2 coefficient comparable to regular L2. - return l2_loss_eq_kernel + l2_loss_circle - - def get_equivalent_kernel_bias(self): - kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) - kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) - kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) - return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid - - def _pad_1x1_to_3x3_tensor(self, kernel1x1): - if kernel1x1 is None: - return 0 - else: - return torch.nn.functional.pad(kernel1x1, [1,1,1,1]) - - def _fuse_bn_tensor(self, branch): - if branch is None: - return 0, 0 - if not isinstance(branch, nn.BatchNorm2d): - if isinstance(branch, OREPA_3x3_RepConv): - kernel = branch.weight_gen() - elif isinstance(branch, ConvBN): - kernel = branch.conv.weight - else: - raise NotImplementedError - running_mean = branch.bn.running_mean - running_var = branch.bn.running_var - gamma = branch.bn.weight - beta = branch.bn.bias - eps = branch.bn.eps - else: - if not hasattr(self, 'id_tensor'): - input_dim = self.in_channels // self.groups - kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32) - for i in range(self.in_channels): - kernel_value[i, i % input_dim, 1, 1] = 1 - self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) - kernel = self.id_tensor - running_mean = branch.running_mean - running_var = branch.running_var - gamma = branch.weight - beta = branch.bias - eps = branch.eps - std = (running_var + eps).sqrt() - t = (gamma / std).reshape(-1, 1, 1, 1) - return kernel * t, beta - running_mean * gamma / std - - def switch_to_deploy(self): - if hasattr(self, 'rbr_reparam'): - return - print(f"RepConv_OREPA.switch_to_deploy") - kernel, bias = self.get_equivalent_kernel_bias() - self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.in_channels, out_channels=self.rbr_dense.out_channels, - kernel_size=self.rbr_dense.kernel_size, stride=self.rbr_dense.stride, - padding=self.rbr_dense.padding, dilation=self.rbr_dense.dilation, groups=self.rbr_dense.groups, bias=True) - self.rbr_reparam.weight.data = kernel - self.rbr_reparam.bias.data = bias - for para in self.parameters(): - para.detach_() - self.__delattr__('rbr_dense') - self.__delattr__('rbr_1x1') - if hasattr(self, 'rbr_identity'): - self.__delattr__('rbr_identity') - -##### end of orepa ##### - - -##### swin transformer ##### - -class WindowAttention(nn.Module): - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - nn.init.normal_(self.relative_position_bias_table, std=.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - - B_, N, C = x.shape - qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - # print(attn.dtype, v.dtype) - try: - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - except: - #print(attn.dtype, v.dtype) - x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - -class Mlp(nn.Module): - - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - -def window_partition(x, window_size): - - B, H, W, C = x.shape - assert H % window_size == 0, 'feature map h and w can not divide by window size' - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - -def window_reverse(windows, window_size, H, W): - - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class SwinTransformerLayer(nn.Module): - - def __init__(self, dim, num_heads, window_size=8, shift_size=0, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.SiLU, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - # if min(self.input_resolution) <= self.window_size: - # # if window size is larger than input resolution, we don't partition windows - # self.shift_size = 0 - # self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def create_mask(self, H, W): - # calculate attention mask for SW-MSA - img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - return attn_mask - - def forward(self, x): - # reshape x[b c h w] to x[b l c] - _, _, H_, W_ = x.shape - - Padding = False - if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0: - Padding = True - # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.') - pad_r = (self.window_size - W_ % self.window_size) % self.window_size - pad_b = (self.window_size - H_ % self.window_size) % self.window_size - x = F.pad(x, (0, pad_r, 0, pad_b)) - - # print('2', x.shape) - B, C, H, W = x.shape - L = H * W - x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c - - # create mask from init to forward - if self.shift_size > 0: - attn_mask = self.create_mask(H, W).to(x.device) - else: - attn_mask = None - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w - - if Padding: - x = x[:, :, :H_, :W_] # reverse padding - - return x - - -class SwinTransformerBlock(nn.Module): - def __init__(self, c1, c2, num_heads, num_layers, window_size=8): - super().__init__() - self.conv = None - if c1 != c2: - self.conv = Conv(c1, c2) - - # remove input_resolution - self.blocks = nn.Sequential(*[SwinTransformerLayer(dim=c2, num_heads=num_heads, window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)]) - - def forward(self, x): - if self.conv is not None: - x = self.conv(x) - x = self.blocks(x) - return x - - -class STCSPA(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(STCSPA, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformerBlock(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.m(self.cv1(x)) - y2 = self.cv2(x) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class STCSPB(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(STCSPB, self).__init__() - c_ = int(c2) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformerBlock(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - x1 = self.cv1(x) - y1 = self.m(x1) - y2 = self.cv2(x1) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class STCSPC(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(STCSPC, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(c_, c_, 1, 1) - self.cv4 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformerBlock(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(torch.cat((y1, y2), dim=1)) - -##### end of swin transformer ##### - - -##### swin transformer v2 ##### - -class WindowAttention_v2(nn.Module): - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., - pretrained_window_size=[0, 0]): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.pretrained_window_size = pretrained_window_size - self.num_heads = num_heads - - self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True) - - # mlp to generate continuous relative position bias - self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True), - nn.ReLU(inplace=True), - nn.Linear(512, num_heads, bias=False)) - - # get relative_coords_table - relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32) - relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32) - relative_coords_table = torch.stack( - torch.meshgrid([relative_coords_h, - relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 - if pretrained_window_size[0] > 0: - relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1) - relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1) - else: - relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) - relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) - relative_coords_table *= 8 # normalize to -8, 8 - relative_coords_table = torch.sign(relative_coords_table) * torch.log2( - torch.abs(relative_coords_table) + 1.0) / np.log2(8) - - self.register_buffer("relative_coords_table", relative_coords_table) - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=False) - if qkv_bias: - self.q_bias = nn.Parameter(torch.zeros(dim)) - self.v_bias = nn.Parameter(torch.zeros(dim)) - else: - self.q_bias = None - self.v_bias = None - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - - B_, N, C = x.shape - qkv_bias = None - if self.q_bias is not None: - qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) - qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) - qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - # cosine attention - attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) - logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01))).exp() - attn = attn * logit_scale - - relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) - relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - relative_position_bias = 16 * torch.sigmoid(relative_position_bias) - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - try: - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - except: - x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C) - - x = self.proj(x) - x = self.proj_drop(x) - return x - - def extra_repr(self) -> str: - return f'dim={self.dim}, window_size={self.window_size}, ' \ - f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}' - - def flops(self, N): - # calculate flops for 1 window with token length of N - flops = 0 - # qkv = self.qkv(x) - flops += N * self.dim * 3 * self.dim - # attn = (q @ k.transpose(-2, -1)) - flops += self.num_heads * N * (self.dim // self.num_heads) * N - # x = (attn @ v) - flops += self.num_heads * N * N * (self.dim // self.num_heads) - # x = self.proj(x) - flops += N * self.dim * self.dim - return flops - -class Mlp_v2(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition_v2(x, window_size): - - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse_v2(windows, window_size, H, W): - - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class SwinTransformerLayer_v2(nn.Module): - - def __init__(self, dim, num_heads, window_size=7, shift_size=0, - mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.SiLU, norm_layer=nn.LayerNorm, pretrained_window_size=0): - super().__init__() - self.dim = dim - #self.input_resolution = input_resolution - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - #if min(self.input_resolution) <= self.window_size: - # # if window size is larger than input resolution, we don't partition windows - # self.shift_size = 0 - # self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention_v2( - dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, - pretrained_window_size=(pretrained_window_size, pretrained_window_size)) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp_v2(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def create_mask(self, H, W): - # calculate attention mask for SW-MSA - img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - return attn_mask - - def forward(self, x): - # reshape x[b c h w] to x[b l c] - _, _, H_, W_ = x.shape - - Padding = False - if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0: - Padding = True - # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.') - pad_r = (self.window_size - W_ % self.window_size) % self.window_size - pad_b = (self.window_size - H_ % self.window_size) % self.window_size - x = F.pad(x, (0, pad_r, 0, pad_b)) - - # print('2', x.shape) - B, C, H, W = x.shape - L = H * W - x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c - - # create mask from init to forward - if self.shift_size > 0: - attn_mask = self.create_mask(H, W).to(x.device) - else: - attn_mask = None - - shortcut = x - x = x.view(B, H, W, C) - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition_v2(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse_v2(attn_windows, self.window_size, H, W) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - x = x.view(B, H * W, C) - x = shortcut + self.drop_path(self.norm1(x)) - - # FFN - x = x + self.drop_path(self.norm2(self.mlp(x))) - x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w - - if Padding: - x = x[:, :, :H_, :W_] # reverse padding - - return x - - def extra_repr(self) -> str: - return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ - f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" - - def flops(self): - flops = 0 - H, W = self.input_resolution - # norm1 - flops += self.dim * H * W - # W-MSA/SW-MSA - nW = H * W / self.window_size / self.window_size - flops += nW * self.attn.flops(self.window_size * self.window_size) - # mlp - flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio - # norm2 - flops += self.dim * H * W - return flops - - -class SwinTransformer2Block(nn.Module): - def __init__(self, c1, c2, num_heads, num_layers, window_size=7): - super().__init__() - self.conv = None - if c1 != c2: - self.conv = Conv(c1, c2) - - # remove input_resolution - self.blocks = nn.Sequential(*[SwinTransformerLayer_v2(dim=c2, num_heads=num_heads, window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)]) - - def forward(self, x): - if self.conv is not None: - x = self.conv(x) - x = self.blocks(x) - return x - - -class ST2CSPA(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(ST2CSPA, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformer2Block(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.m(self.cv1(x)) - y2 = self.cv2(x) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class ST2CSPB(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(ST2CSPB, self).__init__() - c_ = int(c2) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformer2Block(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - x1 = self.cv1(x) - y1 = self.m(x1) - y2 = self.cv2(x1) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class ST2CSPC(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(ST2CSPC, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(c_, c_, 1, 1) - self.cv4 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformer2Block(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(torch.cat((y1, y2), dim=1)) - -##### end of swin transformer v2 ##### diff --git a/spaces/SharkGaming/VisualAI/README.md b/spaces/SharkGaming/VisualAI/README.md deleted file mode 100644 index c9d539975fa932109f14e7917ae72446842280ca..0000000000000000000000000000000000000000 --- a/spaces/SharkGaming/VisualAI/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: VisualAI -emoji: 👀 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Shivraj8615/Huggy/index.html b/spaces/Shivraj8615/Huggy/index.html deleted file mode 100644 index 65c99948b644b1dc5d3945994c1b968d16e4e046..0000000000000000000000000000000000000000 --- a/spaces/Shivraj8615/Huggy/index.html +++ /dev/null @@ -1,133 +0,0 @@ - - - - - - - Huggy - - - - - - - - - - - -
    - -
    - - - - - - - - diff --git a/spaces/SuYuanS/AudioCraft_Plus/model_cards/AUDIOGEN_MODEL_CARD.md b/spaces/SuYuanS/AudioCraft_Plus/model_cards/AUDIOGEN_MODEL_CARD.md deleted file mode 100644 index 92decf5e16e05ce0c2e72af8aa6728b5186c6882..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/model_cards/AUDIOGEN_MODEL_CARD.md +++ /dev/null @@ -1,79 +0,0 @@ -# AudioGen Model Card - -## Model details -**Organization developing the model:** The FAIR team of Meta AI. - -**Model date:** This version of AudioGen was trained between July 2023 and August 2023. - -**Model version:** This is version 2 of the model, not to be confused with the original AudioGen model published in ["AudioGen: Textually Guided Audio Generation"][audiogen]. -In this version (v2), AudioGen was trained on the same data, but with some other differences: -1. This model was trained on 10 seconds (vs. 5 seconds in v1). -2. The discrete representation used under the hood is extracted using a retrained EnCodec model on the environmental sound data, following the EnCodec setup detailed in the ["Simple and Controllable Music Generation" paper][musicgen]. -3. No audio mixing augmentations. - -**Model type:** AudioGen consists of an EnCodec model for audio tokenization, and an auto-regressive language model based on the transformer architecture for audio modeling. The released model has 1.5B parameters. - -**Paper or resource for more information:** More information can be found in the paper [AudioGen: Textually Guided Audio Generation](https://arxiv.org/abs/2209.15352). - -**Citation details:** See [AudioGen paper][audiogen] - -**License:** Code is released under MIT, model weights are released under CC-BY-NC 4.0. - -**Where to send questions or comments about the model:** Questions and comments about AudioGen can be sent via the [GitHub repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue. - -## Intended use -**Primary intended use:** The primary use of AudioGen is research on AI-based audio generation, including: -- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science -- Generation of sound guided by text to understand current abilities of generative AI models by machine learning amateurs - -**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models. - -**Out-of-scope use cases** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate audio pieces that create hostile or alienating environments for people. This includes generating audio that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. - -## Metrics - -**Models performance measures:** We used the following objective measure to evaluate the model on a standard audio benchmark: -- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish) -- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST) - -Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes: -- Overall quality of the audio samples; -- Text relevance to the provided text input; - -More details on performance measures and human studies can be found in the paper. - -**Decision thresholds:** Not applicable. - -## Evaluation datasets - -The model was evaluated on the [AudioCaps benchmark](https://audiocaps.github.io/). - -## Training datasets - -The model was trained on the following data sources: a subset of AudioSet (Gemmeke et al., 2017), [BBC sound effects](https://sound-effects.bbcrewind.co.uk/), AudioCaps (Kim et al., 2019), Clotho v2 (Drossos et al., 2020), VGG-Sound (Chen et al., 2020), FSD50K (Fonseca et al., 2021), [Free To Use Sounds](https://www.freetousesounds.com/all-in-one-bundle/), [Sonniss Game Effects](https://sonniss.com/gameaudiogdc), [WeSoundEffects](https://wesoundeffects.com/we-sound-effects-bundle-2020/), [Paramount Motion - Odeon Cinematic Sound Effects](https://www.paramountmotion.com/odeon-sound-effects). - -## Evaluation results - -Below are the objective metrics obtained with the released model on AudioCaps (consisting of 10-second long samples). Note that the model differs from the original AudioGen model introduced in the paper, hence the difference in the metrics. - -| Model | Frechet Audio Distance | KLD | Text consistency | -|---|---|---|---| -| facebook/audiogen-medium | 1.77 | 1.41 | 0.299 | - -More information can be found in the paper [AudioGen: Textually Guided Audio Generation][audiogen], in the Experiments section. - -## Limitations and biases - -**Limitations:** -- The model is not able to generate realistic vocals. -- The model has been trained with English descriptions and will not perform as well in other languages. -- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results. - -**Biases:** The datasets used for training may be lacking of diversity and are not representative of all possible sound events. The generated samples from the model will reflect the biases from the training data. - -**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data. - -**Use cases:** Users must be aware of the biases, limitations and risks of the model. AudioGen is a model developed for artificial intelligence research on audio generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks. - -[musicgen]: https://arxiv.org/abs/2306.05284 -[audiogen]: https://arxiv.org/abs/2209.15352 diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevconsole.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevconsole.py deleted file mode 100644 index 6b1378887c600e57f14b905cba8861c96ed62bab..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevconsole.py +++ /dev/null @@ -1,603 +0,0 @@ -''' -Entry point module to start the interactive console. -''' -from _pydev_bundle._pydev_saved_modules import thread, _code -from _pydevd_bundle.pydevd_constants import IS_JYTHON -start_new_thread = thread.start_new_thread - -from _pydevd_bundle.pydevconsole_code import InteractiveConsole - -compile_command = _code.compile_command -InteractiveInterpreter = _code.InteractiveInterpreter - -import os -import sys - -from _pydev_bundle._pydev_saved_modules import threading -from _pydevd_bundle.pydevd_constants import INTERACTIVE_MODE_AVAILABLE - -import traceback -from _pydev_bundle import pydev_log - -from _pydevd_bundle import pydevd_save_locals - -from _pydev_bundle.pydev_imports import Exec, _queue - -import builtins as __builtin__ - -from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface, BaseStdIn # @UnusedImport -from _pydev_bundle.pydev_console_utils import CodeFragment - - -class Command: - - def __init__(self, interpreter, code_fragment): - """ - :type code_fragment: CodeFragment - :type interpreter: InteractiveConsole - """ - self.interpreter = interpreter - self.code_fragment = code_fragment - self.more = None - - def symbol_for_fragment(code_fragment): - if code_fragment.is_single_line: - symbol = 'single' - else: - if IS_JYTHON: - symbol = 'single' # Jython doesn't support exec - else: - symbol = 'exec' - return symbol - - symbol_for_fragment = staticmethod(symbol_for_fragment) - - def run(self): - text = self.code_fragment.text - symbol = self.symbol_for_fragment(self.code_fragment) - - self.more = self.interpreter.runsource(text, '', symbol) - - -try: - from _pydev_bundle.pydev_imports import execfile - - __builtin__.execfile = execfile -except: - pass - -# Pull in runfile, the interface to UMD that wraps execfile -from _pydev_bundle.pydev_umd import runfile, _set_globals_function -if sys.version_info[0] >= 3: - __builtin__.runfile = runfile -else: - __builtin__.runfile = runfile - - -#======================================================================================================================= -# InterpreterInterface -#======================================================================================================================= -class InterpreterInterface(BaseInterpreterInterface): - ''' - The methods in this class should be registered in the xml-rpc server. - ''' - - def __init__(self, host, client_port, mainThread, connect_status_queue=None): - BaseInterpreterInterface.__init__(self, mainThread, connect_status_queue) - self.client_port = client_port - self.host = host - self.namespace = {} - self.interpreter = InteractiveConsole(self.namespace) - self._input_error_printed = False - - def do_add_exec(self, codeFragment): - command = Command(self.interpreter, codeFragment) - command.run() - return command.more - - def get_namespace(self): - return self.namespace - - def getCompletions(self, text, act_tok): - try: - from _pydev_bundle._pydev_completer import Completer - - completer = Completer(self.namespace, None) - return completer.complete(act_tok) - except: - pydev_log.exception() - return [] - - def close(self): - sys.exit(0) - - def get_greeting_msg(self): - return 'PyDev console: starting.\n' - - -class _ProcessExecQueueHelper: - _debug_hook = None - _return_control_osc = False - - -def set_debug_hook(debug_hook): - _ProcessExecQueueHelper._debug_hook = debug_hook - - -def activate_mpl_if_already_imported(interpreter): - if interpreter.mpl_modules_for_patching: - for module in list(interpreter.mpl_modules_for_patching): - if module in sys.modules: - activate_function = interpreter.mpl_modules_for_patching.pop(module) - activate_function() - - -def init_set_return_control_back(interpreter): - from pydev_ipython.inputhook import set_return_control_callback - - def return_control(): - ''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find - out if they should cede control and return ''' - if _ProcessExecQueueHelper._debug_hook: - # Some of the input hooks check return control without doing - # a single operation, so we don't return True on every - # call when the debug hook is in place to allow the GUI to run - # XXX: Eventually the inputhook code will have diverged enough - # from the IPython source that it will be worthwhile rewriting - # it rather than pretending to maintain the old API - _ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc - if _ProcessExecQueueHelper._return_control_osc: - return True - - if not interpreter.exec_queue.empty(): - return True - return False - - set_return_control_callback(return_control) - - -def init_mpl_in_console(interpreter): - init_set_return_control_back(interpreter) - - if not INTERACTIVE_MODE_AVAILABLE: - return - - activate_mpl_if_already_imported(interpreter) - from _pydev_bundle.pydev_import_hook import import_hook_manager - for mod in list(interpreter.mpl_modules_for_patching): - import_hook_manager.add_module_name(mod, interpreter.mpl_modules_for_patching.pop(mod)) - - -if sys.platform != 'win32': - - if not hasattr(os, 'kill'): # Jython may not have it. - - def pid_exists(pid): - return True - - else: - - def pid_exists(pid): - # Note that this function in the face of errors will conservatively consider that - # the pid is still running (because we'll exit the current process when it's - # no longer running, so, we need to be 100% sure it actually exited). - - import errno - if pid == 0: - # According to "man 2 kill" PID 0 has a special meaning: - # it refers to <> so we don't want to go any further. - # If we get here it means this UNIX platform *does* have - # a process with id 0. - return True - try: - os.kill(pid, 0) - except OSError as err: - if err.errno == errno.ESRCH: - # ESRCH == No such process - return False - elif err.errno == errno.EPERM: - # EPERM clearly means there's a process to deny access to - return True - else: - # According to "man 2 kill" possible error values are - # (EINVAL, EPERM, ESRCH) therefore we should never get - # here. If we do, although it's an error, consider it - # exists (see first comment in this function). - return True - else: - return True - -else: - - def pid_exists(pid): - # Note that this function in the face of errors will conservatively consider that - # the pid is still running (because we'll exit the current process when it's - # no longer running, so, we need to be 100% sure it actually exited). - import ctypes - kernel32 = ctypes.windll.kernel32 - - PROCESS_QUERY_INFORMATION = 0x0400 - PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 - ERROR_INVALID_PARAMETER = 0x57 - STILL_ACTIVE = 259 - - process = kernel32.OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_QUERY_LIMITED_INFORMATION, 0, pid) - if not process: - err = kernel32.GetLastError() - if err == ERROR_INVALID_PARAMETER: - # Means it doesn't exist (pid parameter is wrong). - return False - - # There was some unexpected error (such as access denied), so - # consider it exists (although it could be something else, but we don't want - # to raise any errors -- so, just consider it exists). - return True - - try: - zero = ctypes.c_int(0) - exit_code = ctypes.pointer(zero) - - exit_code_suceeded = kernel32.GetExitCodeProcess(process, exit_code) - if not exit_code_suceeded: - # There was some unexpected error (such as access denied), so - # consider it exists (although it could be something else, but we don't want - # to raise any errors -- so, just consider it exists). - return True - - elif bool(exit_code.contents.value) and int(exit_code.contents.value) != STILL_ACTIVE: - return False - finally: - kernel32.CloseHandle(process) - - return True - - -def process_exec_queue(interpreter): - init_mpl_in_console(interpreter) - from pydev_ipython.inputhook import get_inputhook - try: - kill_if_pid_not_alive = int(os.environ.get('PYDEV_ECLIPSE_PID', '-1')) - except: - kill_if_pid_not_alive = -1 - - while 1: - if kill_if_pid_not_alive != -1: - if not pid_exists(kill_if_pid_not_alive): - exit() - - # Running the request may have changed the inputhook in use - inputhook = get_inputhook() - - if _ProcessExecQueueHelper._debug_hook: - _ProcessExecQueueHelper._debug_hook() - - if inputhook: - try: - # Note: it'll block here until return_control returns True. - inputhook() - except: - pydev_log.exception() - try: - try: - code_fragment = interpreter.exec_queue.get(block=True, timeout=1 / 20.) # 20 calls/second - except _queue.Empty: - continue - - if callable(code_fragment): - # It can be a callable (i.e.: something that must run in the main - # thread can be put in the queue for later execution). - code_fragment() - else: - more = interpreter.add_exec(code_fragment) - except KeyboardInterrupt: - interpreter.buffer = None - continue - except SystemExit: - raise - except: - pydev_log.exception('Error processing queue on pydevconsole.') - exit() - - -if 'IPYTHONENABLE' in os.environ: - IPYTHON = os.environ['IPYTHONENABLE'] == 'True' -else: - # By default, don't use IPython because occasionally changes - # in IPython break pydevd. - IPYTHON = False - -try: - try: - exitfunc = sys.exitfunc - except AttributeError: - exitfunc = None - - if IPYTHON: - from _pydev_bundle.pydev_ipython_console import InterpreterInterface - if exitfunc is not None: - sys.exitfunc = exitfunc - else: - try: - delattr(sys, 'exitfunc') - except: - pass -except: - IPYTHON = False - pass - - -#======================================================================================================================= -# _DoExit -#======================================================================================================================= -def do_exit(*args): - ''' - We have to override the exit because calling sys.exit will only actually exit the main thread, - and as we're in a Xml-rpc server, that won't work. - ''' - - try: - import java.lang.System - - java.lang.System.exit(1) - except ImportError: - if len(args) == 1: - os._exit(args[0]) - else: - os._exit(0) - - -#======================================================================================================================= -# start_console_server -#======================================================================================================================= -def start_console_server(host, port, interpreter): - try: - if port == 0: - host = '' - - # I.e.: supporting the internal Jython version in PyDev to create a Jython interactive console inside Eclipse. - from _pydev_bundle.pydev_imports import SimpleXMLRPCServer as XMLRPCServer # @Reimport - - try: - server = XMLRPCServer((host, port), logRequests=False, allow_none=True) - - except: - sys.stderr.write('Error starting server with host: "%s", port: "%s", client_port: "%s"\n' % (host, port, interpreter.client_port)) - sys.stderr.flush() - raise - - # Tell UMD the proper default namespace - _set_globals_function(interpreter.get_namespace) - - server.register_function(interpreter.execLine) - server.register_function(interpreter.execMultipleLines) - server.register_function(interpreter.getCompletions) - server.register_function(interpreter.getFrame) - server.register_function(interpreter.getVariable) - server.register_function(interpreter.changeVariable) - server.register_function(interpreter.getDescription) - server.register_function(interpreter.close) - server.register_function(interpreter.interrupt) - server.register_function(interpreter.handshake) - server.register_function(interpreter.connectToDebugger) - server.register_function(interpreter.hello) - server.register_function(interpreter.getArray) - server.register_function(interpreter.evaluate) - server.register_function(interpreter.ShowConsole) - server.register_function(interpreter.loadFullValue) - - # Functions for GUI main loop integration - server.register_function(interpreter.enableGui) - - if port == 0: - (h, port) = server.socket.getsockname() - - print(port) - print(interpreter.client_port) - - while True: - try: - server.serve_forever() - except: - # Ugly code to be py2/3 compatible - # https://sw-brainwy.rhcloud.com/tracker/PyDev/534: - # Unhandled "interrupted system call" error in the pydevconsol.py - e = sys.exc_info()[1] - retry = False - try: - retry = e.args[0] == 4 # errno.EINTR - except: - pass - if not retry: - raise - # Otherwise, keep on going - return server - except: - pydev_log.exception() - # Notify about error to avoid long waiting - connection_queue = interpreter.get_connect_status_queue() - if connection_queue is not None: - connection_queue.put(False) - - -def start_server(host, port, client_port): - # replace exit (see comments on method) - # note that this does not work in jython!!! (sys method can't be replaced). - sys.exit = do_exit - - interpreter = InterpreterInterface(host, client_port, threading.current_thread()) - - start_new_thread(start_console_server, (host, port, interpreter)) - - process_exec_queue(interpreter) - - -def get_ipython_hidden_vars(): - if IPYTHON and hasattr(__builtin__, 'interpreter'): - interpreter = get_interpreter() - return interpreter.get_ipython_hidden_vars_dict() - - -def get_interpreter(): - try: - interpreterInterface = getattr(__builtin__, 'interpreter') - except AttributeError: - interpreterInterface = InterpreterInterface(None, None, threading.current_thread()) - __builtin__.interpreter = interpreterInterface - sys.stderr.write(interpreterInterface.get_greeting_msg()) - sys.stderr.flush() - - return interpreterInterface - - -def get_completions(text, token, globals, locals): - interpreterInterface = get_interpreter() - - interpreterInterface.interpreter.update(globals, locals) - - return interpreterInterface.getCompletions(text, token) - -#=============================================================================== -# Debugger integration -#=============================================================================== - - -def exec_code(code, globals, locals, debugger): - interpreterInterface = get_interpreter() - interpreterInterface.interpreter.update(globals, locals) - - res = interpreterInterface.need_more(code) - - if res: - return True - - interpreterInterface.add_exec(code, debugger) - - return False - - -class ConsoleWriter(InteractiveInterpreter): - skip = 0 - - def __init__(self, locals=None): - InteractiveInterpreter.__init__(self, locals) - - def write(self, data): - # if (data.find("global_vars") == -1 and data.find("pydevd") == -1): - if self.skip > 0: - self.skip -= 1 - else: - if data == "Traceback (most recent call last):\n": - self.skip = 1 - sys.stderr.write(data) - - def showsyntaxerror(self, filename=None): - """Display the syntax error that just occurred.""" - # Override for avoid using sys.excepthook PY-12600 - type, value, tb = sys.exc_info() - sys.last_type = type - sys.last_value = value - sys.last_traceback = tb - if filename and type is SyntaxError: - # Work hard to stuff the correct filename in the exception - try: - msg, (dummy_filename, lineno, offset, line) = value.args - except ValueError: - # Not the format we expect; leave it alone - pass - else: - # Stuff in the right filename - value = SyntaxError(msg, (filename, lineno, offset, line)) - sys.last_value = value - list = traceback.format_exception_only(type, value) - sys.stderr.write(''.join(list)) - - def showtraceback(self, *args, **kwargs): - """Display the exception that just occurred.""" - # Override for avoid using sys.excepthook PY-12600 - try: - type, value, tb = sys.exc_info() - sys.last_type = type - sys.last_value = value - sys.last_traceback = tb - tblist = traceback.extract_tb(tb) - del tblist[:1] - lines = traceback.format_list(tblist) - if lines: - lines.insert(0, "Traceback (most recent call last):\n") - lines.extend(traceback.format_exception_only(type, value)) - finally: - tblist = tb = None - sys.stderr.write(''.join(lines)) - - -def console_exec(thread_id, frame_id, expression, dbg): - """returns 'False' in case expression is partially correct - """ - frame = dbg.find_frame(thread_id, frame_id) - - is_multiline = expression.count('@LINE@') > 1 - expression = str(expression.replace('@LINE@', '\n')) - - # Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329 - # (Names not resolved in generator expression in method) - # See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html - updated_globals = {} - updated_globals.update(frame.f_globals) - updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals - - if IPYTHON: - need_more = exec_code(CodeFragment(expression), updated_globals, frame.f_locals, dbg) - if not need_more: - pydevd_save_locals.save_locals(frame) - return need_more - - interpreter = ConsoleWriter() - - if not is_multiline: - try: - code = compile_command(expression) - except (OverflowError, SyntaxError, ValueError): - # Case 1 - interpreter.showsyntaxerror() - return False - if code is None: - # Case 2 - return True - else: - code = expression - - # Case 3 - - try: - Exec(code, updated_globals, frame.f_locals) - - except SystemExit: - raise - except: - interpreter.showtraceback() - else: - pydevd_save_locals.save_locals(frame) - return False - - -#======================================================================================================================= -# main -#======================================================================================================================= -if __name__ == '__main__': - # Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole - # so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple - # representations of its classes). - # See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446: - # 'Variables' and 'Expressions' views stopped working when debugging interactive console - import pydevconsole - sys.stdin = pydevconsole.BaseStdIn(sys.stdin) - port, client_port = sys.argv[1:3] - from _pydev_bundle import pydev_localhost - - if int(port) == 0 and int(client_port) == 0: - (h, p) = pydev_localhost.get_socket_name() - - client_port = p - - pydevconsole.start_server(pydev_localhost.get_localhost(), int(port), int(client_port)) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/datasets/pascal_context.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/datasets/pascal_context.py deleted file mode 100644 index 541a63c66a13fb16fd52921e755715ad8d078fdd..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/datasets/pascal_context.py +++ /dev/null @@ -1,103 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class PascalContextDataset(CustomDataset): - """PascalContext dataset. - - In segmentation map annotation for PascalContext, 0 stands for background, - which is included in 60 categories. ``reduce_zero_label`` is fixed to - False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is - fixed to '.png'. - - Args: - split (str): Split txt file for PascalContext. - """ - - CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench', - 'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus', - 'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth', - 'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence', - 'floor', 'flower', 'food', 'grass', 'ground', 'horse', - 'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person', - 'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep', - 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table', - 'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water', - 'window', 'wood') - - PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], - [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], - [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], - [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], - [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], - [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], - [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], - [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], - [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], - [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], - [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], - [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], - [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], - [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], - [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]] - - def __init__(self, split, **kwargs): - super(PascalContextDataset, self).__init__( - img_suffix='.jpg', - seg_map_suffix='.png', - split=split, - reduce_zero_label=False, - **kwargs) - assert osp.exists(self.img_dir) and self.split is not None - - -@DATASETS.register_module() -class PascalContextDataset59(CustomDataset): - """PascalContext dataset. - - In segmentation map annotation for PascalContext, 0 stands for background, - which is included in 60 categories. ``reduce_zero_label`` is fixed to - False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is - fixed to '.png'. - - Args: - split (str): Split txt file for PascalContext. - """ - - CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle', - 'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet', - 'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow', - 'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower', - 'food', 'grass', 'ground', 'horse', 'keyboard', 'light', - 'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform', - 'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk', - 'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train', - 'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood') - - PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], - [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], - [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], - [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], - [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], - [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], - [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], - [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], - [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], - [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], - [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], - [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], - [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], - [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], - [0, 235, 255], [0, 173, 255], [31, 0, 255]] - - def __init__(self, split, **kwargs): - super(PascalContextDataset59, self).__init__( - img_suffix='.jpg', - seg_map_suffix='.png', - split=split, - reduce_zero_label=True, - **kwargs) - assert osp.exists(self.img_dir) and self.split is not None diff --git a/spaces/Sybghat/resume-parser/README.md b/spaces/Sybghat/resume-parser/README.md deleted file mode 100644 index 980bc13b9e11a02e5352541f32f9cf57caa0e78b..0000000000000000000000000000000000000000 --- a/spaces/Sybghat/resume-parser/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Resume Parser -emoji: 🏢 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/TandCAcceptMe/face-swap-docker/plugins/plugin_faceswap.py b/spaces/TandCAcceptMe/face-swap-docker/plugins/plugin_faceswap.py deleted file mode 100644 index 5b63e6fac6ecb1c4868fc2e076bbda224f0a434c..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/plugins/plugin_faceswap.py +++ /dev/null @@ -1,86 +0,0 @@ -from chain_img_processor import ChainImgProcessor, ChainImgPlugin -from roop.face_helper import get_one_face, get_many_faces, swap_face -import os -from roop.utilities import compute_cosine_distance - -modname = os.path.basename(__file__)[:-3] # calculating modname - -# start function -def start(core:ChainImgProcessor): - manifest = { # plugin settings - "name": "Faceswap", # name - "version": "1.0", # version - - "default_options": { - "swap_mode": "selected", - "max_distance": 0.65, # max distance to detect face similarity - }, - "img_processor": { - "faceswap": Faceswap - } - } - return manifest - -def start_with_options(core:ChainImgProcessor, manifest:dict): - pass - - -class Faceswap(ChainImgPlugin): - - def init_plugin(self): - pass - - - def process(self, frame, params:dict): - if not "input_face_datas" in params or len(params["input_face_datas"]) < 1: - params["face_detected"] = False - return frame - - temp_frame = frame - params["face_detected"] = True - params["processed_faces"] = [] - - if params["swap_mode"] == "first": - face = get_one_face(frame) - if face is None: - params["face_detected"] = False - return frame - params["processed_faces"].append(face) - frame = swap_face(params["input_face_datas"][0], face, frame) - return frame - - else: - faces = get_many_faces(frame) - if(len(faces) < 1): - params["face_detected"] = False - return frame - - dist_threshold = params["face_distance_threshold"] - - if params["swap_mode"] == "all": - for sf in params["input_face_datas"]: - for face in faces: - params["processed_faces"].append(face) - temp_frame = swap_face(sf, face, temp_frame) - return temp_frame - - elif params["swap_mode"] == "selected": - for i,tf in enumerate(params["target_face_datas"]): - for face in faces: - if compute_cosine_distance(tf.embedding, face.embedding) <= dist_threshold: - temp_frame = swap_face(params["input_face_datas"][i], face, temp_frame) - params["processed_faces"].append(face) - break - - elif params["swap_mode"] == "all_female" or params["swap_mode"] == "all_male": - gender = 'F' if params["swap_mode"] == "all_female" else 'M' - face_found = False - for face in faces: - if face.sex == gender: - face_found = True - if face_found: - params["processed_faces"].append(face) - temp_frame = swap_face(params["input_face_datas"][0], face, temp_frame) - face_found = False - - return temp_frame diff --git a/spaces/TangibleAI/mathtext/api_scaling.sh b/spaces/TangibleAI/mathtext/api_scaling.sh deleted file mode 100644 index ed46f98b182fbca454c1292d0e4c175c977d335b..0000000000000000000000000000000000000000 --- a/spaces/TangibleAI/mathtext/api_scaling.sh +++ /dev/null @@ -1,83 +0,0 @@ -#! /bin/env bash - -LOG_FILE_NAME="call_history_bash.csv" - -if [[ ! -f "$LOG_FILE_NAME" ]]; then - # Creation of column names if the file does not exits - echo "student_id;active_students;endpoint;inputs;outputs;started;finished" >$LOG_FILE_NAME -fi - -data_list_1() { - responses=( - "one hundred forty five" - "twenty thousand nine hundred fifty" - "one hundred forty five" - "nine hundred eighty three" - "five million" - ) - echo "${responses[$1]}" -} - -data_list_2() { - responses=( - "Totally agree" - "I like it" - "No more" - "I am not sure" - "Never" - ) - echo "${responses[$1]}" -} - -# endpoints: "text2int" "sentiment-analysis" -# selected endpoint to test -endpoint="sentiment-analysis" - -create_random_delay() { - # creates a random delay for given arguments - echo "scale=8; $RANDOM/32768*$1" | bc -} - -simulate_student() { - # Student simulator waits randomly between 0-10s after an interaction. - # Based on 100 interactions per student - for i in {1..100}; do - - random_value=$((RANDOM % 5)) - text=$(data_list_2 $random_value) - data='{"data": ["'$text'"]}' - - start_=$(date +"%F %T.%6N") - - url="https://tangibleai-mathtext.hf.space/run/$3" - response=$(curl --silent --connect-timeout 30 --max-time 30 -X POST "$url" -H 'Content-Type: application/json' -d "$data") - - if [[ "$response" == *"Time-out"* ]]; then - echo "$response" >>bad_response.txt - response="504 Gateway Time-out" - elif [[ -z "$response" ]]; then - echo "No response" >>bad_response.txt - response="504 Gateway Time-out" - fi - - end_=$(date +"%F %T.%6N") - - printf "%s;%s;%s;%s;%s;%s;%s\n" "$1" "$2" "$3" "$data" "$response" "$start_" "$end_" >>$LOG_FILE_NAME - sleep "$(create_random_delay 10)" - - done -} - -echo "start: $(date)" - -active_students=250 # the number of students using the system at the same time - -i=1 -while [[ "$i" -le "$active_students" ]]; do - simulate_student "student$i" "$active_students" "$endpoint" & - sleep "$(create_random_delay 1)" # adding a random delay between students - i=$(("$i" + 1)) -done - -wait -echo "end: $(date)" diff --git a/spaces/TrustSafeAI/NCTV/assets/css/bootstrap/bootstrap-utilities.css b/spaces/TrustSafeAI/NCTV/assets/css/bootstrap/bootstrap-utilities.css deleted file mode 100644 index c509cf53fb0e2764fce3a838480d58d6262dc477..0000000000000000000000000000000000000000 --- a/spaces/TrustSafeAI/NCTV/assets/css/bootstrap/bootstrap-utilities.css +++ /dev/null @@ -1,4866 +0,0 @@ -/*! - * Bootstrap Utilities v5.1.3 (https://getbootstrap.com/) - * Copyright 2011-2021 The Bootstrap Authors - * Copyright 2011-2021 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) - */ -.clearfix::after { - display: block; - clear: both; - content: ""; -} - -.link-primary { - color: #0d6efd; -} -.link-primary:hover, .link-primary:focus { - color: #0a58ca; -} - -.link-secondary { - color: #6c757d; -} -.link-secondary:hover, .link-secondary:focus { - color: #565e64; -} - -.link-success { - color: #198754; -} -.link-success:hover, .link-success:focus { - color: #146c43; -} - -.link-info { - color: #0dcaf0; -} -.link-info:hover, .link-info:focus { - color: #3dd5f3; -} - -.link-warning { - color: #ffc107; -} -.link-warning:hover, .link-warning:focus { - color: #ffcd39; -} - -.link-danger { - color: #dc3545; -} -.link-danger:hover, .link-danger:focus { - color: #b02a37; -} - -.link-light { - color: #f8f9fa; -} -.link-light:hover, .link-light:focus { - color: #f9fafb; -} - -.link-dark { - color: #212529; -} -.link-dark:hover, .link-dark:focus { - color: #1a1e21; -} - -.ratio { - position: relative; - width: 100%; -} -.ratio::before { - display: block; - padding-top: var(--bs-aspect-ratio); - content: ""; -} -.ratio > * { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; -} - -.ratio-1x1 { - --bs-aspect-ratio: 100%; -} - -.ratio-4x3 { - --bs-aspect-ratio: 75%; -} - -.ratio-16x9 { - --bs-aspect-ratio: 56.25%; -} - -.ratio-21x9 { - --bs-aspect-ratio: 42.8571428571%; -} - -.fixed-top { - position: fixed; - top: 0; - right: 0; - left: 0; - z-index: 1030; -} - -.fixed-bottom { - position: fixed; - right: 0; - bottom: 0; - left: 0; - z-index: 1030; -} - -.sticky-top { - position: -webkit-sticky; - position: sticky; - top: 0; - z-index: 1020; -} - -@media (min-width: 576px) { - .sticky-sm-top { - position: -webkit-sticky; - position: sticky; - top: 0; - z-index: 1020; - } -} -@media (min-width: 768px) { - .sticky-md-top { - position: -webkit-sticky; - position: sticky; - top: 0; - z-index: 1020; - } -} -@media (min-width: 992px) { - .sticky-lg-top { - position: -webkit-sticky; - position: sticky; - top: 0; - z-index: 1020; - } -} -@media (min-width: 1200px) { - .sticky-xl-top { - position: -webkit-sticky; - position: sticky; - top: 0; - z-index: 1020; - } -} -@media (min-width: 1400px) { - .sticky-xxl-top { - position: -webkit-sticky; - position: sticky; - top: 0; - z-index: 1020; - } -} -.hstack { - display: flex; - flex-direction: row; - align-items: center; - align-self: stretch; -} - -.vstack { - display: flex; - flex: 1 1 auto; - flex-direction: column; - align-self: stretch; -} - -.visually-hidden, -.visually-hidden-focusable:not(:focus):not(:focus-within) { - position: absolute !important; - width: 1px !important; - height: 1px !important; - padding: 0 !important; - margin: -1px !important; - overflow: hidden !important; - clip: rect(0, 0, 0, 0) !important; - white-space: nowrap !important; - border: 0 !important; -} - -.stretched-link::after { - position: absolute; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1; - content: ""; -} - -.text-truncate { - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.vr { - display: inline-block; - align-self: stretch; - width: 1px; - min-height: 1em; - background-color: currentColor; - opacity: 0.25; -} - -.align-baseline { - vertical-align: baseline !important; -} - -.align-top { - vertical-align: top !important; -} - -.align-middle { - vertical-align: middle !important; -} - -.align-bottom { - vertical-align: bottom !important; -} - -.align-text-bottom { - vertical-align: text-bottom !important; -} - -.align-text-top { - vertical-align: text-top !important; -} - -.float-start { - float: left !important; -} - -.float-end { - float: right !important; -} - -.float-none { - float: none !important; -} - -.opacity-0 { - opacity: 0 !important; -} - -.opacity-25 { - opacity: 0.25 !important; -} - -.opacity-50 { - opacity: 0.5 !important; -} - -.opacity-75 { - opacity: 0.75 !important; -} - -.opacity-100 { - opacity: 1 !important; -} - -.overflow-auto { - overflow: auto !important; -} - -.overflow-hidden { - overflow: hidden !important; -} - -.overflow-visible { - overflow: visible !important; -} - -.overflow-scroll { - overflow: scroll !important; -} - -.d-inline { - display: inline !important; -} - -.d-inline-block { - display: inline-block !important; -} - -.d-block { - display: block !important; -} - -.d-grid { - display: grid !important; -} - -.d-table { - display: table !important; -} - -.d-table-row { - display: table-row !important; -} - -.d-table-cell { - display: table-cell !important; -} - -.d-flex { - display: flex !important; -} - -.d-inline-flex { - display: inline-flex !important; -} - -.d-none { - display: none !important; -} - -.shadow { - box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15) !important; -} - -.shadow-sm { - box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075) !important; -} - -.shadow-lg { - box-shadow: 0 1rem 3rem rgba(0, 0, 0, 0.175) !important; -} - -.shadow-none { - box-shadow: none !important; -} - -.position-static { - position: static !important; -} - -.position-relative { - position: relative !important; -} - -.position-absolute { - position: absolute !important; -} - -.position-fixed { - position: fixed !important; -} - -.position-sticky { - position: -webkit-sticky !important; - position: sticky !important; -} - -.top-0 { - top: 0 !important; -} - -.top-50 { - top: 50% !important; -} - -.top-100 { - top: 100% !important; -} - -.bottom-0 { - bottom: 0 !important; -} - -.bottom-50 { - bottom: 50% !important; -} - -.bottom-100 { - bottom: 100% !important; -} - -.start-0 { - left: 0 !important; -} - -.start-50 { - left: 50% !important; -} - -.start-100 { - left: 100% !important; -} - -.end-0 { - right: 0 !important; -} - -.end-50 { - right: 50% !important; -} - -.end-100 { - right: 100% !important; -} - -.translate-middle { - transform: translate(-50%, -50%) !important; -} - -.translate-middle-x { - transform: translateX(-50%) !important; -} - -.translate-middle-y { - transform: translateY(-50%) !important; -} - -.border { - border: 1px solid #dee2e6 !important; -} - -.border-0 { - border: 0 !important; -} - -.border-top { - border-top: 1px solid #dee2e6 !important; -} - -.border-top-0 { - border-top: 0 !important; -} - -.border-end { - border-right: 1px solid #dee2e6 !important; -} - -.border-end-0 { - border-right: 0 !important; -} - -.border-bottom { - border-bottom: 1px solid #dee2e6 !important; -} - -.border-bottom-0 { - border-bottom: 0 !important; -} - -.border-start { - border-left: 1px solid #dee2e6 !important; -} - -.border-start-0 { - border-left: 0 !important; -} - -.border-primary { - border-color: #0d6efd !important; -} - -.border-secondary { - border-color: #6c757d !important; -} - -.border-success { - border-color: #198754 !important; -} - -.border-info { - border-color: #0dcaf0 !important; -} - -.border-warning { - border-color: #ffc107 !important; -} - -.border-danger { - border-color: #dc3545 !important; -} - -.border-light { - border-color: #f8f9fa !important; -} - -.border-dark { - border-color: #212529 !important; -} - -.border-white { - border-color: #fff !important; -} - -.border-1 { - border-width: 1px !important; -} - -.border-2 { - border-width: 2px !important; -} - -.border-3 { - border-width: 3px !important; -} - -.border-4 { - border-width: 4px !important; -} - -.border-5 { - border-width: 5px !important; -} - -.w-25 { - width: 25% !important; -} - -.w-50 { - width: 50% !important; -} - -.w-75 { - width: 75% !important; -} - -.w-100 { - width: 100% !important; -} - -.w-auto { - width: auto !important; -} - -.mw-100 { - max-width: 100% !important; -} - -.vw-100 { - width: 100vw !important; -} - -.min-vw-100 { - min-width: 100vw !important; -} - -.h-25 { - height: 25% !important; -} - -.h-50 { - height: 50% !important; -} - -.h-75 { - height: 75% !important; -} - -.h-100 { - height: 100% !important; -} - -.h-auto { - height: auto !important; -} - -.mh-100 { - max-height: 100% !important; -} - -.vh-100 { - height: 100vh !important; -} - -.min-vh-100 { - min-height: 100vh !important; -} - -.flex-fill { - flex: 1 1 auto !important; -} - -.flex-row { - flex-direction: row !important; -} - -.flex-column { - flex-direction: column !important; -} - -.flex-row-reverse { - flex-direction: row-reverse !important; -} - -.flex-column-reverse { - flex-direction: column-reverse !important; -} - -.flex-grow-0 { - flex-grow: 0 !important; -} - -.flex-grow-1 { - flex-grow: 1 !important; -} - -.flex-shrink-0 { - flex-shrink: 0 !important; -} - -.flex-shrink-1 { - flex-shrink: 1 !important; -} - -.flex-wrap { - flex-wrap: wrap !important; -} - -.flex-nowrap { - flex-wrap: nowrap !important; -} - -.flex-wrap-reverse { - flex-wrap: wrap-reverse !important; -} - -.gap-0 { - gap: 0 !important; -} - -.gap-1 { - gap: 0.25rem !important; -} - -.gap-2 { - gap: 0.5rem !important; -} - -.gap-3 { - gap: 1rem !important; -} - -.gap-4 { - gap: 1.5rem !important; -} - -.gap-5 { - gap: 3rem !important; -} - -.justify-content-start { - justify-content: flex-start !important; -} - -.justify-content-end { - justify-content: flex-end !important; -} - -.justify-content-center { - justify-content: center !important; -} - -.justify-content-between { - justify-content: space-between !important; -} - -.justify-content-around { - justify-content: space-around !important; -} - -.justify-content-evenly { - justify-content: space-evenly !important; -} - -.align-items-start { - align-items: flex-start !important; -} - -.align-items-end { - align-items: flex-end !important; -} - -.align-items-center { - align-items: center !important; -} - -.align-items-baseline { - align-items: baseline !important; -} - -.align-items-stretch { - align-items: stretch !important; -} - -.align-content-start { - align-content: flex-start !important; -} - -.align-content-end { - align-content: flex-end !important; -} - -.align-content-center { - align-content: center !important; -} - -.align-content-between { - align-content: space-between !important; -} - -.align-content-around { - align-content: space-around !important; -} - -.align-content-stretch { - align-content: stretch !important; -} - -.align-self-auto { - align-self: auto !important; -} - -.align-self-start { - align-self: flex-start !important; -} - -.align-self-end { - align-self: flex-end !important; -} - -.align-self-center { - align-self: center !important; -} - -.align-self-baseline { - align-self: baseline !important; -} - -.align-self-stretch { - align-self: stretch !important; -} - -.order-first { - order: -1 !important; -} - -.order-0 { - order: 0 !important; -} - -.order-1 { - order: 1 !important; -} - -.order-2 { - order: 2 !important; -} - -.order-3 { - order: 3 !important; -} - -.order-4 { - order: 4 !important; -} - -.order-5 { - order: 5 !important; -} - -.order-last { - order: 6 !important; -} - -.m-0 { - margin: 0 !important; -} - -.m-1 { - margin: 0.25rem !important; -} - -.m-2 { - margin: 0.5rem !important; -} - -.m-3 { - margin: 1rem !important; -} - -.m-4 { - margin: 1.5rem !important; -} - -.m-5 { - margin: 3rem !important; -} - -.m-auto { - margin: auto !important; -} - -.mx-0 { - margin-right: 0 !important; - margin-left: 0 !important; -} - -.mx-1 { - margin-right: 0.25rem !important; - margin-left: 0.25rem !important; -} - -.mx-2 { - margin-right: 0.5rem !important; - margin-left: 0.5rem !important; -} - -.mx-3 { - margin-right: 1rem !important; - margin-left: 1rem !important; -} - -.mx-4 { - margin-right: 1.5rem !important; - margin-left: 1.5rem !important; -} - -.mx-5 { - margin-right: 3rem !important; - margin-left: 3rem !important; -} - -.mx-auto { - margin-right: auto !important; - margin-left: auto !important; -} - -.my-0 { - margin-top: 0 !important; - margin-bottom: 0 !important; -} - -.my-1 { - margin-top: 0.25rem !important; - margin-bottom: 0.25rem !important; -} - -.my-2 { - margin-top: 0.5rem !important; - margin-bottom: 0.5rem !important; -} - -.my-3 { - margin-top: 1rem !important; - margin-bottom: 1rem !important; -} - -.my-4 { - margin-top: 1.5rem !important; - margin-bottom: 1.5rem !important; -} - -.my-5 { - margin-top: 3rem !important; - margin-bottom: 3rem !important; -} - -.my-auto { - margin-top: auto !important; - margin-bottom: auto !important; -} - -.mt-0 { - margin-top: 0 !important; -} - -.mt-1 { - margin-top: 0.25rem !important; -} - -.mt-2 { - margin-top: 0.5rem !important; -} - -.mt-3 { - margin-top: 1rem !important; -} - -.mt-4 { - margin-top: 1.5rem !important; -} - -.mt-5 { - margin-top: 3rem !important; -} - -.mt-auto { - margin-top: auto !important; -} - -.me-0 { - margin-right: 0 !important; -} - -.me-1 { - margin-right: 0.25rem !important; -} - -.me-2 { - margin-right: 0.5rem !important; -} - -.me-3 { - margin-right: 1rem !important; -} - -.me-4 { - margin-right: 1.5rem !important; -} - -.me-5 { - margin-right: 3rem !important; -} - -.me-auto { - margin-right: auto !important; -} - -.mb-0 { - margin-bottom: 0 !important; -} - -.mb-1 { - margin-bottom: 0.25rem !important; -} - -.mb-2 { - margin-bottom: 0.5rem !important; -} - -.mb-3 { - margin-bottom: 1rem !important; -} - -.mb-4 { - margin-bottom: 1.5rem !important; -} - -.mb-5 { - margin-bottom: 3rem !important; -} - -.mb-auto { - margin-bottom: auto !important; -} - -.ms-0 { - margin-left: 0 !important; -} - -.ms-1 { - margin-left: 0.25rem !important; -} - -.ms-2 { - margin-left: 0.5rem !important; -} - -.ms-3 { - margin-left: 1rem !important; -} - -.ms-4 { - margin-left: 1.5rem !important; -} - -.ms-5 { - margin-left: 3rem !important; -} - -.ms-auto { - margin-left: auto !important; -} - -.p-0 { - padding: 0 !important; -} - -.p-1 { - padding: 0.25rem !important; -} - -.p-2 { - padding: 0.5rem !important; -} - -.p-3 { - padding: 1rem !important; -} - -.p-4 { - padding: 1.5rem !important; -} - -.p-5 { - padding: 3rem !important; -} - -.px-0 { - padding-right: 0 !important; - padding-left: 0 !important; -} - -.px-1 { - padding-right: 0.25rem !important; - padding-left: 0.25rem !important; -} - -.px-2 { - padding-right: 0.5rem !important; - padding-left: 0.5rem !important; -} - -.px-3 { - padding-right: 1rem !important; - padding-left: 1rem !important; -} - -.px-4 { - padding-right: 1.5rem !important; - padding-left: 1.5rem !important; -} - -.px-5 { - padding-right: 3rem !important; - padding-left: 3rem !important; -} - -.py-0 { - padding-top: 0 !important; - padding-bottom: 0 !important; -} - -.py-1 { - padding-top: 0.25rem !important; - padding-bottom: 0.25rem !important; -} - -.py-2 { - padding-top: 0.5rem !important; - padding-bottom: 0.5rem !important; -} - -.py-3 { - padding-top: 1rem !important; - padding-bottom: 1rem !important; -} - -.py-4 { - padding-top: 1.5rem !important; - padding-bottom: 1.5rem !important; -} - -.py-5 { - padding-top: 3rem !important; - padding-bottom: 3rem !important; -} - -.pt-0 { - padding-top: 0 !important; -} - -.pt-1 { - padding-top: 0.25rem !important; -} - -.pt-2 { - padding-top: 0.5rem !important; -} - -.pt-3 { - padding-top: 1rem !important; -} - -.pt-4 { - padding-top: 1.5rem !important; -} - -.pt-5 { - padding-top: 3rem !important; -} - -.pe-0 { - padding-right: 0 !important; -} - -.pe-1 { - padding-right: 0.25rem !important; -} - -.pe-2 { - padding-right: 0.5rem !important; -} - -.pe-3 { - padding-right: 1rem !important; -} - -.pe-4 { - padding-right: 1.5rem !important; -} - -.pe-5 { - padding-right: 3rem !important; -} - -.pb-0 { - padding-bottom: 0 !important; -} - -.pb-1 { - padding-bottom: 0.25rem !important; -} - -.pb-2 { - padding-bottom: 0.5rem !important; -} - -.pb-3 { - padding-bottom: 1rem !important; -} - -.pb-4 { - padding-bottom: 1.5rem !important; -} - -.pb-5 { - padding-bottom: 3rem !important; -} - -.ps-0 { - padding-left: 0 !important; -} - -.ps-1 { - padding-left: 0.25rem !important; -} - -.ps-2 { - padding-left: 0.5rem !important; -} - -.ps-3 { - padding-left: 1rem !important; -} - -.ps-4 { - padding-left: 1.5rem !important; -} - -.ps-5 { - padding-left: 3rem !important; -} - -.font-monospace { - font-family: var(--bs-font-monospace) !important; -} - -.fs-1 { - font-size: calc(1.375rem + 1.5vw) !important; -} - -.fs-2 { - font-size: calc(1.325rem + 0.9vw) !important; -} - -.fs-3 { - font-size: calc(1.3rem + 0.6vw) !important; -} - -.fs-4 { - font-size: calc(1.275rem + 0.3vw) !important; -} - -.fs-5 { - font-size: 1.25rem !important; -} - -.fs-6 { - font-size: 1rem !important; -} - -.fst-italic { - font-style: italic !important; -} - -.fst-normal { - font-style: normal !important; -} - -.fw-light { - font-weight: 300 !important; -} - -.fw-lighter { - font-weight: lighter !important; -} - -.fw-normal { - font-weight: 400 !important; -} - -.fw-bold { - font-weight: 700 !important; -} - -.fw-bolder { - font-weight: bolder !important; -} - -.lh-1 { - line-height: 1 !important; -} - -.lh-sm { - line-height: 1.25 !important; -} - -.lh-base { - line-height: 1.5 !important; -} - -.lh-lg { - line-height: 2 !important; -} - -.text-start { - text-align: left !important; -} - -.text-end { - text-align: right !important; -} - -.text-center { - text-align: center !important; -} - -.text-decoration-none { - text-decoration: none !important; -} - -.text-decoration-underline { - text-decoration: underline !important; -} - -.text-decoration-line-through { - text-decoration: line-through !important; -} - -.text-lowercase { - text-transform: lowercase !important; -} - -.text-uppercase { - text-transform: uppercase !important; -} - -.text-capitalize { - text-transform: capitalize !important; -} - -.text-wrap { - white-space: normal !important; -} - -.text-nowrap { - white-space: nowrap !important; -} - -/* rtl:begin:remove */ -.text-break { - word-wrap: break-word !important; - word-break: break-word !important; -} - -/* rtl:end:remove */ -.text-primary { - --bs-text-opacity: 1; - color: rgba(var(--bs-primary-rgb), var(--bs-text-opacity)) !important; -} - -.text-secondary { - --bs-text-opacity: 1; - color: rgba(var(--bs-secondary-rgb), var(--bs-text-opacity)) !important; -} - -.text-success { - --bs-text-opacity: 1; - color: rgba(var(--bs-success-rgb), var(--bs-text-opacity)) !important; -} - -.text-info { - --bs-text-opacity: 1; - color: rgba(var(--bs-info-rgb), var(--bs-text-opacity)) !important; -} - -.text-warning { - --bs-text-opacity: 1; - color: rgba(var(--bs-warning-rgb), var(--bs-text-opacity)) !important; -} - -.text-danger { - --bs-text-opacity: 1; - color: rgba(var(--bs-danger-rgb), var(--bs-text-opacity)) !important; -} - -.text-light { - --bs-text-opacity: 1; - color: rgba(var(--bs-light-rgb), var(--bs-text-opacity)) !important; -} - -.text-dark { - --bs-text-opacity: 1; - color: rgba(var(--bs-dark-rgb), var(--bs-text-opacity)) !important; -} - -.text-black { - --bs-text-opacity: 1; - color: rgba(var(--bs-black-rgb), var(--bs-text-opacity)) !important; -} - -.text-white { - --bs-text-opacity: 1; - color: rgba(var(--bs-white-rgb), var(--bs-text-opacity)) !important; -} - -.text-body { - --bs-text-opacity: 1; - color: rgba(var(--bs-body-color-rgb), var(--bs-text-opacity)) !important; -} - -.text-muted { - --bs-text-opacity: 1; - color: #6c757d !important; -} - -.text-black-50 { - --bs-text-opacity: 1; - color: rgba(0, 0, 0, 0.5) !important; -} - -.text-white-50 { - --bs-text-opacity: 1; - color: rgba(255, 255, 255, 0.5) !important; -} - -.text-reset { - --bs-text-opacity: 1; - color: inherit !important; -} - -.text-opacity-25 { - --bs-text-opacity: 0.25; -} - -.text-opacity-50 { - --bs-text-opacity: 0.5; -} - -.text-opacity-75 { - --bs-text-opacity: 0.75; -} - -.text-opacity-100 { - --bs-text-opacity: 1; -} - -.bg-primary { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-primary-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-secondary { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-secondary-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-success { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-success-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-info { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-info-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-warning { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-warning-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-danger { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-danger-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-light { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-light-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-dark { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-dark-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-black { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-black-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-white { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-white-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-body { - --bs-bg-opacity: 1; - background-color: rgba(var(--bs-body-bg-rgb), var(--bs-bg-opacity)) !important; -} - -.bg-transparent { - --bs-bg-opacity: 1; - background-color: transparent !important; -} - -.bg-opacity-10 { - --bs-bg-opacity: 0.1; -} - -.bg-opacity-25 { - --bs-bg-opacity: 0.25; -} - -.bg-opacity-50 { - --bs-bg-opacity: 0.5; -} - -.bg-opacity-75 { - --bs-bg-opacity: 0.75; -} - -.bg-opacity-100 { - --bs-bg-opacity: 1; -} - -.bg-gradient { - background-image: var(--bs-gradient) !important; -} - -.user-select-all { - -webkit-user-select: all !important; - -moz-user-select: all !important; - user-select: all !important; -} - -.user-select-auto { - -webkit-user-select: auto !important; - -moz-user-select: auto !important; - user-select: auto !important; -} - -.user-select-none { - -webkit-user-select: none !important; - -moz-user-select: none !important; - user-select: none !important; -} - -.pe-none { - pointer-events: none !important; -} - -.pe-auto { - pointer-events: auto !important; -} - -.rounded { - border-radius: 0.25rem !important; -} - -.rounded-0 { - border-radius: 0 !important; -} - -.rounded-1 { - border-radius: 0.2rem !important; -} - -.rounded-2 { - border-radius: 0.25rem !important; -} - -.rounded-3 { - border-radius: 0.3rem !important; -} - -.rounded-circle { - border-radius: 50% !important; -} - -.rounded-pill { - border-radius: 50rem !important; -} - -.rounded-top { - border-top-left-radius: 0.25rem !important; - border-top-right-radius: 0.25rem !important; -} - -.rounded-end { - border-top-right-radius: 0.25rem !important; - border-bottom-right-radius: 0.25rem !important; -} - -.rounded-bottom { - border-bottom-right-radius: 0.25rem !important; - border-bottom-left-radius: 0.25rem !important; -} - -.rounded-start { - border-bottom-left-radius: 0.25rem !important; - border-top-left-radius: 0.25rem !important; -} - -.visible { - visibility: visible !important; -} - -.invisible { - visibility: hidden !important; -} - -@media (min-width: 576px) { - .float-sm-start { - float: left !important; - } - - .float-sm-end { - float: right !important; - } - - .float-sm-none { - float: none !important; - } - - .d-sm-inline { - display: inline !important; - } - - .d-sm-inline-block { - display: inline-block !important; - } - - .d-sm-block { - display: block !important; - } - - .d-sm-grid { - display: grid !important; - } - - .d-sm-table { - display: table !important; - } - - .d-sm-table-row { - display: table-row !important; - } - - .d-sm-table-cell { - display: table-cell !important; - } - - .d-sm-flex { - display: flex !important; - } - - .d-sm-inline-flex { - display: inline-flex !important; - } - - .d-sm-none { - display: none !important; - } - - .flex-sm-fill { - flex: 1 1 auto !important; - } - - .flex-sm-row { - flex-direction: row !important; - } - - .flex-sm-column { - flex-direction: column !important; - } - - .flex-sm-row-reverse { - flex-direction: row-reverse !important; - } - - .flex-sm-column-reverse { - flex-direction: column-reverse !important; - } - - .flex-sm-grow-0 { - flex-grow: 0 !important; - } - - .flex-sm-grow-1 { - flex-grow: 1 !important; - } - - .flex-sm-shrink-0 { - flex-shrink: 0 !important; - } - - .flex-sm-shrink-1 { - flex-shrink: 1 !important; - } - - .flex-sm-wrap { - flex-wrap: wrap !important; - } - - .flex-sm-nowrap { - flex-wrap: nowrap !important; - } - - .flex-sm-wrap-reverse { - flex-wrap: wrap-reverse !important; - } - - .gap-sm-0 { - gap: 0 !important; - } - - .gap-sm-1 { - gap: 0.25rem !important; - } - - .gap-sm-2 { - gap: 0.5rem !important; - } - - .gap-sm-3 { - gap: 1rem !important; - } - - .gap-sm-4 { - gap: 1.5rem !important; - } - - .gap-sm-5 { - gap: 3rem !important; - } - - .justify-content-sm-start { - justify-content: flex-start !important; - } - - .justify-content-sm-end { - justify-content: flex-end !important; - } - - .justify-content-sm-center { - justify-content: center !important; - } - - .justify-content-sm-between { - justify-content: space-between !important; - } - - .justify-content-sm-around { - justify-content: space-around !important; - } - - .justify-content-sm-evenly { - justify-content: space-evenly !important; - } - - .align-items-sm-start { - align-items: flex-start !important; - } - - .align-items-sm-end { - align-items: flex-end !important; - } - - .align-items-sm-center { - align-items: center !important; - } - - .align-items-sm-baseline { - align-items: baseline !important; - } - - .align-items-sm-stretch { - align-items: stretch !important; - } - - .align-content-sm-start { - align-content: flex-start !important; - } - - .align-content-sm-end { - align-content: flex-end !important; - } - - .align-content-sm-center { - align-content: center !important; - } - - .align-content-sm-between { - align-content: space-between !important; - } - - .align-content-sm-around { - align-content: space-around !important; - } - - .align-content-sm-stretch { - align-content: stretch !important; - } - - .align-self-sm-auto { - align-self: auto !important; - } - - .align-self-sm-start { - align-self: flex-start !important; - } - - .align-self-sm-end { - align-self: flex-end !important; - } - - .align-self-sm-center { - align-self: center !important; - } - - .align-self-sm-baseline { - align-self: baseline !important; - } - - .align-self-sm-stretch { - align-self: stretch !important; - } - - .order-sm-first { - order: -1 !important; - } - - .order-sm-0 { - order: 0 !important; - } - - .order-sm-1 { - order: 1 !important; - } - - .order-sm-2 { - order: 2 !important; - } - - .order-sm-3 { - order: 3 !important; - } - - .order-sm-4 { - order: 4 !important; - } - - .order-sm-5 { - order: 5 !important; - } - - .order-sm-last { - order: 6 !important; - } - - .m-sm-0 { - margin: 0 !important; - } - - .m-sm-1 { - margin: 0.25rem !important; - } - - .m-sm-2 { - margin: 0.5rem !important; - } - - .m-sm-3 { - margin: 1rem !important; - } - - .m-sm-4 { - margin: 1.5rem !important; - } - - .m-sm-5 { - margin: 3rem !important; - } - - .m-sm-auto { - margin: auto !important; - } - - .mx-sm-0 { - margin-right: 0 !important; - margin-left: 0 !important; - } - - .mx-sm-1 { - margin-right: 0.25rem !important; - margin-left: 0.25rem !important; - } - - .mx-sm-2 { - margin-right: 0.5rem !important; - margin-left: 0.5rem !important; - } - - .mx-sm-3 { - margin-right: 1rem !important; - margin-left: 1rem !important; - } - - .mx-sm-4 { - margin-right: 1.5rem !important; - margin-left: 1.5rem !important; - } - - .mx-sm-5 { - margin-right: 3rem !important; - margin-left: 3rem !important; - } - - .mx-sm-auto { - margin-right: auto !important; - margin-left: auto !important; - } - - .my-sm-0 { - margin-top: 0 !important; - margin-bottom: 0 !important; - } - - .my-sm-1 { - margin-top: 0.25rem !important; - margin-bottom: 0.25rem !important; - } - - .my-sm-2 { - margin-top: 0.5rem !important; - margin-bottom: 0.5rem !important; - } - - .my-sm-3 { - margin-top: 1rem !important; - margin-bottom: 1rem !important; - } - - .my-sm-4 { - margin-top: 1.5rem !important; - margin-bottom: 1.5rem !important; - } - - .my-sm-5 { - margin-top: 3rem !important; - margin-bottom: 3rem !important; - } - - .my-sm-auto { - margin-top: auto !important; - margin-bottom: auto !important; - } - - .mt-sm-0 { - margin-top: 0 !important; - } - - .mt-sm-1 { - margin-top: 0.25rem !important; - } - - .mt-sm-2 { - margin-top: 0.5rem !important; - } - - .mt-sm-3 { - margin-top: 1rem !important; - } - - .mt-sm-4 { - margin-top: 1.5rem !important; - } - - .mt-sm-5 { - margin-top: 3rem !important; - } - - .mt-sm-auto { - margin-top: auto !important; - } - - .me-sm-0 { - margin-right: 0 !important; - } - - .me-sm-1 { - margin-right: 0.25rem !important; - } - - .me-sm-2 { - margin-right: 0.5rem !important; - } - - .me-sm-3 { - margin-right: 1rem !important; - } - - .me-sm-4 { - margin-right: 1.5rem !important; - } - - .me-sm-5 { - margin-right: 3rem !important; - } - - .me-sm-auto { - margin-right: auto !important; - } - - .mb-sm-0 { - margin-bottom: 0 !important; - } - - .mb-sm-1 { - margin-bottom: 0.25rem !important; - } - - .mb-sm-2 { - margin-bottom: 0.5rem !important; - } - - .mb-sm-3 { - margin-bottom: 1rem !important; - } - - .mb-sm-4 { - margin-bottom: 1.5rem !important; - } - - .mb-sm-5 { - margin-bottom: 3rem !important; - } - - .mb-sm-auto { - margin-bottom: auto !important; - } - - .ms-sm-0 { - margin-left: 0 !important; - } - - .ms-sm-1 { - margin-left: 0.25rem !important; - } - - .ms-sm-2 { - margin-left: 0.5rem !important; - } - - .ms-sm-3 { - margin-left: 1rem !important; - } - - .ms-sm-4 { - margin-left: 1.5rem !important; - } - - .ms-sm-5 { - margin-left: 3rem !important; - } - - .ms-sm-auto { - margin-left: auto !important; - } - - .p-sm-0 { - padding: 0 !important; - } - - .p-sm-1 { - padding: 0.25rem !important; - } - - .p-sm-2 { - padding: 0.5rem !important; - } - - .p-sm-3 { - padding: 1rem !important; - } - - .p-sm-4 { - padding: 1.5rem !important; - } - - .p-sm-5 { - padding: 3rem !important; - } - - .px-sm-0 { - padding-right: 0 !important; - padding-left: 0 !important; - } - - .px-sm-1 { - padding-right: 0.25rem !important; - padding-left: 0.25rem !important; - } - - .px-sm-2 { - padding-right: 0.5rem !important; - padding-left: 0.5rem !important; - } - - .px-sm-3 { - padding-right: 1rem !important; - padding-left: 1rem !important; - } - - .px-sm-4 { - padding-right: 1.5rem !important; - padding-left: 1.5rem !important; - } - - .px-sm-5 { - padding-right: 3rem !important; - padding-left: 3rem !important; - } - - .py-sm-0 { - padding-top: 0 !important; - padding-bottom: 0 !important; - } - - .py-sm-1 { - padding-top: 0.25rem !important; - padding-bottom: 0.25rem !important; - } - - .py-sm-2 { - padding-top: 0.5rem !important; - padding-bottom: 0.5rem !important; - } - - .py-sm-3 { - padding-top: 1rem !important; - padding-bottom: 1rem !important; - } - - .py-sm-4 { - padding-top: 1.5rem !important; - padding-bottom: 1.5rem !important; - } - - .py-sm-5 { - padding-top: 3rem !important; - padding-bottom: 3rem !important; - } - - .pt-sm-0 { - padding-top: 0 !important; - } - - .pt-sm-1 { - padding-top: 0.25rem !important; - } - - .pt-sm-2 { - padding-top: 0.5rem !important; - } - - .pt-sm-3 { - padding-top: 1rem !important; - } - - .pt-sm-4 { - padding-top: 1.5rem !important; - } - - .pt-sm-5 { - padding-top: 3rem !important; - } - - .pe-sm-0 { - padding-right: 0 !important; - } - - .pe-sm-1 { - padding-right: 0.25rem !important; - } - - .pe-sm-2 { - padding-right: 0.5rem !important; - } - - .pe-sm-3 { - padding-right: 1rem !important; - } - - .pe-sm-4 { - padding-right: 1.5rem !important; - } - - .pe-sm-5 { - padding-right: 3rem !important; - } - - .pb-sm-0 { - padding-bottom: 0 !important; - } - - .pb-sm-1 { - padding-bottom: 0.25rem !important; - } - - .pb-sm-2 { - padding-bottom: 0.5rem !important; - } - - .pb-sm-3 { - padding-bottom: 1rem !important; - } - - .pb-sm-4 { - padding-bottom: 1.5rem !important; - } - - .pb-sm-5 { - padding-bottom: 3rem !important; - } - - .ps-sm-0 { - padding-left: 0 !important; - } - - .ps-sm-1 { - padding-left: 0.25rem !important; - } - - .ps-sm-2 { - padding-left: 0.5rem !important; - } - - .ps-sm-3 { - padding-left: 1rem !important; - } - - .ps-sm-4 { - padding-left: 1.5rem !important; - } - - .ps-sm-5 { - padding-left: 3rem !important; - } - - .text-sm-start { - text-align: left !important; - } - - .text-sm-end { - text-align: right !important; - } - - .text-sm-center { - text-align: center !important; - } -} -@media (min-width: 768px) { - .float-md-start { - float: left !important; - } - - .float-md-end { - float: right !important; - } - - .float-md-none { - float: none !important; - } - - .d-md-inline { - display: inline !important; - } - - .d-md-inline-block { - display: inline-block !important; - } - - .d-md-block { - display: block !important; - } - - .d-md-grid { - display: grid !important; - } - - .d-md-table { - display: table !important; - } - - .d-md-table-row { - display: table-row !important; - } - - .d-md-table-cell { - display: table-cell !important; - } - - .d-md-flex { - display: flex !important; - } - - .d-md-inline-flex { - display: inline-flex !important; - } - - .d-md-none { - display: none !important; - } - - .flex-md-fill { - flex: 1 1 auto !important; - } - - .flex-md-row { - flex-direction: row !important; - } - - .flex-md-column { - flex-direction: column !important; - } - - .flex-md-row-reverse { - flex-direction: row-reverse !important; - } - - .flex-md-column-reverse { - flex-direction: column-reverse !important; - } - - .flex-md-grow-0 { - flex-grow: 0 !important; - } - - .flex-md-grow-1 { - flex-grow: 1 !important; - } - - .flex-md-shrink-0 { - flex-shrink: 0 !important; - } - - .flex-md-shrink-1 { - flex-shrink: 1 !important; - } - - .flex-md-wrap { - flex-wrap: wrap !important; - } - - .flex-md-nowrap { - flex-wrap: nowrap !important; - } - - .flex-md-wrap-reverse { - flex-wrap: wrap-reverse !important; - } - - .gap-md-0 { - gap: 0 !important; - } - - .gap-md-1 { - gap: 0.25rem !important; - } - - .gap-md-2 { - gap: 0.5rem !important; - } - - .gap-md-3 { - gap: 1rem !important; - } - - .gap-md-4 { - gap: 1.5rem !important; - } - - .gap-md-5 { - gap: 3rem !important; - } - - .justify-content-md-start { - justify-content: flex-start !important; - } - - .justify-content-md-end { - justify-content: flex-end !important; - } - - .justify-content-md-center { - justify-content: center !important; - } - - .justify-content-md-between { - justify-content: space-between !important; - } - - .justify-content-md-around { - justify-content: space-around !important; - } - - .justify-content-md-evenly { - justify-content: space-evenly !important; - } - - .align-items-md-start { - align-items: flex-start !important; - } - - .align-items-md-end { - align-items: flex-end !important; - } - - .align-items-md-center { - align-items: center !important; - } - - .align-items-md-baseline { - align-items: baseline !important; - } - - .align-items-md-stretch { - align-items: stretch !important; - } - - .align-content-md-start { - align-content: flex-start !important; - } - - .align-content-md-end { - align-content: flex-end !important; - } - - .align-content-md-center { - align-content: center !important; - } - - .align-content-md-between { - align-content: space-between !important; - } - - .align-content-md-around { - align-content: space-around !important; - } - - .align-content-md-stretch { - align-content: stretch !important; - } - - .align-self-md-auto { - align-self: auto !important; - } - - .align-self-md-start { - align-self: flex-start !important; - } - - .align-self-md-end { - align-self: flex-end !important; - } - - .align-self-md-center { - align-self: center !important; - } - - .align-self-md-baseline { - align-self: baseline !important; - } - - .align-self-md-stretch { - align-self: stretch !important; - } - - .order-md-first { - order: -1 !important; - } - - .order-md-0 { - order: 0 !important; - } - - .order-md-1 { - order: 1 !important; - } - - .order-md-2 { - order: 2 !important; - } - - .order-md-3 { - order: 3 !important; - } - - .order-md-4 { - order: 4 !important; - } - - .order-md-5 { - order: 5 !important; - } - - .order-md-last { - order: 6 !important; - } - - .m-md-0 { - margin: 0 !important; - } - - .m-md-1 { - margin: 0.25rem !important; - } - - .m-md-2 { - margin: 0.5rem !important; - } - - .m-md-3 { - margin: 1rem !important; - } - - .m-md-4 { - margin: 1.5rem !important; - } - - .m-md-5 { - margin: 3rem !important; - } - - .m-md-auto { - margin: auto !important; - } - - .mx-md-0 { - margin-right: 0 !important; - margin-left: 0 !important; - } - - .mx-md-1 { - margin-right: 0.25rem !important; - margin-left: 0.25rem !important; - } - - .mx-md-2 { - margin-right: 0.5rem !important; - margin-left: 0.5rem !important; - } - - .mx-md-3 { - margin-right: 1rem !important; - margin-left: 1rem !important; - } - - .mx-md-4 { - margin-right: 1.5rem !important; - margin-left: 1.5rem !important; - } - - .mx-md-5 { - margin-right: 3rem !important; - margin-left: 3rem !important; - } - - .mx-md-auto { - margin-right: auto !important; - margin-left: auto !important; - } - - .my-md-0 { - margin-top: 0 !important; - margin-bottom: 0 !important; - } - - .my-md-1 { - margin-top: 0.25rem !important; - margin-bottom: 0.25rem !important; - } - - .my-md-2 { - margin-top: 0.5rem !important; - margin-bottom: 0.5rem !important; - } - - .my-md-3 { - margin-top: 1rem !important; - margin-bottom: 1rem !important; - } - - .my-md-4 { - margin-top: 1.5rem !important; - margin-bottom: 1.5rem !important; - } - - .my-md-5 { - margin-top: 3rem !important; - margin-bottom: 3rem !important; - } - - .my-md-auto { - margin-top: auto !important; - margin-bottom: auto !important; - } - - .mt-md-0 { - margin-top: 0 !important; - } - - .mt-md-1 { - margin-top: 0.25rem !important; - } - - .mt-md-2 { - margin-top: 0.5rem !important; - } - - .mt-md-3 { - margin-top: 1rem !important; - } - - .mt-md-4 { - margin-top: 1.5rem !important; - } - - .mt-md-5 { - margin-top: 3rem !important; - } - - .mt-md-auto { - margin-top: auto !important; - } - - .me-md-0 { - margin-right: 0 !important; - } - - .me-md-1 { - margin-right: 0.25rem !important; - } - - .me-md-2 { - margin-right: 0.5rem !important; - } - - .me-md-3 { - margin-right: 1rem !important; - } - - .me-md-4 { - margin-right: 1.5rem !important; - } - - .me-md-5 { - margin-right: 3rem !important; - } - - .me-md-auto { - margin-right: auto !important; - } - - .mb-md-0 { - margin-bottom: 0 !important; - } - - .mb-md-1 { - margin-bottom: 0.25rem !important; - } - - .mb-md-2 { - margin-bottom: 0.5rem !important; - } - - .mb-md-3 { - margin-bottom: 1rem !important; - } - - .mb-md-4 { - margin-bottom: 1.5rem !important; - } - - .mb-md-5 { - margin-bottom: 3rem !important; - } - - .mb-md-auto { - margin-bottom: auto !important; - } - - .ms-md-0 { - margin-left: 0 !important; - } - - .ms-md-1 { - margin-left: 0.25rem !important; - } - - .ms-md-2 { - margin-left: 0.5rem !important; - } - - .ms-md-3 { - margin-left: 1rem !important; - } - - .ms-md-4 { - margin-left: 1.5rem !important; - } - - .ms-md-5 { - margin-left: 3rem !important; - } - - .ms-md-auto { - margin-left: auto !important; - } - - .p-md-0 { - padding: 0 !important; - } - - .p-md-1 { - padding: 0.25rem !important; - } - - .p-md-2 { - padding: 0.5rem !important; - } - - .p-md-3 { - padding: 1rem !important; - } - - .p-md-4 { - padding: 1.5rem !important; - } - - .p-md-5 { - padding: 3rem !important; - } - - .px-md-0 { - padding-right: 0 !important; - padding-left: 0 !important; - } - - .px-md-1 { - padding-right: 0.25rem !important; - padding-left: 0.25rem !important; - } - - .px-md-2 { - padding-right: 0.5rem !important; - padding-left: 0.5rem !important; - } - - .px-md-3 { - padding-right: 1rem !important; - padding-left: 1rem !important; - } - - .px-md-4 { - padding-right: 1.5rem !important; - padding-left: 1.5rem !important; - } - - .px-md-5 { - padding-right: 3rem !important; - padding-left: 3rem !important; - } - - .py-md-0 { - padding-top: 0 !important; - padding-bottom: 0 !important; - } - - .py-md-1 { - padding-top: 0.25rem !important; - padding-bottom: 0.25rem !important; - } - - .py-md-2 { - padding-top: 0.5rem !important; - padding-bottom: 0.5rem !important; - } - - .py-md-3 { - padding-top: 1rem !important; - padding-bottom: 1rem !important; - } - - .py-md-4 { - padding-top: 1.5rem !important; - padding-bottom: 1.5rem !important; - } - - .py-md-5 { - padding-top: 3rem !important; - padding-bottom: 3rem !important; - } - - .pt-md-0 { - padding-top: 0 !important; - } - - .pt-md-1 { - padding-top: 0.25rem !important; - } - - .pt-md-2 { - padding-top: 0.5rem !important; - } - - .pt-md-3 { - padding-top: 1rem !important; - } - - .pt-md-4 { - padding-top: 1.5rem !important; - } - - .pt-md-5 { - padding-top: 3rem !important; - } - - .pe-md-0 { - padding-right: 0 !important; - } - - .pe-md-1 { - padding-right: 0.25rem !important; - } - - .pe-md-2 { - padding-right: 0.5rem !important; - } - - .pe-md-3 { - padding-right: 1rem !important; - } - - .pe-md-4 { - padding-right: 1.5rem !important; - } - - .pe-md-5 { - padding-right: 3rem !important; - } - - .pb-md-0 { - padding-bottom: 0 !important; - } - - .pb-md-1 { - padding-bottom: 0.25rem !important; - } - - .pb-md-2 { - padding-bottom: 0.5rem !important; - } - - .pb-md-3 { - padding-bottom: 1rem !important; - } - - .pb-md-4 { - padding-bottom: 1.5rem !important; - } - - .pb-md-5 { - padding-bottom: 3rem !important; - } - - .ps-md-0 { - padding-left: 0 !important; - } - - .ps-md-1 { - padding-left: 0.25rem !important; - } - - .ps-md-2 { - padding-left: 0.5rem !important; - } - - .ps-md-3 { - padding-left: 1rem !important; - } - - .ps-md-4 { - padding-left: 1.5rem !important; - } - - .ps-md-5 { - padding-left: 3rem !important; - } - - .text-md-start { - text-align: left !important; - } - - .text-md-end { - text-align: right !important; - } - - .text-md-center { - text-align: center !important; - } -} -@media (min-width: 992px) { - .float-lg-start { - float: left !important; - } - - .float-lg-end { - float: right !important; - } - - .float-lg-none { - float: none !important; - } - - .d-lg-inline { - display: inline !important; - } - - .d-lg-inline-block { - display: inline-block !important; - } - - .d-lg-block { - display: block !important; - } - - .d-lg-grid { - display: grid !important; - } - - .d-lg-table { - display: table !important; - } - - .d-lg-table-row { - display: table-row !important; - } - - .d-lg-table-cell { - display: table-cell !important; - } - - .d-lg-flex { - display: flex !important; - } - - .d-lg-inline-flex { - display: inline-flex !important; - } - - .d-lg-none { - display: none !important; - } - - .flex-lg-fill { - flex: 1 1 auto !important; - } - - .flex-lg-row { - flex-direction: row !important; - } - - .flex-lg-column { - flex-direction: column !important; - } - - .flex-lg-row-reverse { - flex-direction: row-reverse !important; - } - - .flex-lg-column-reverse { - flex-direction: column-reverse !important; - } - - .flex-lg-grow-0 { - flex-grow: 0 !important; - } - - .flex-lg-grow-1 { - flex-grow: 1 !important; - } - - .flex-lg-shrink-0 { - flex-shrink: 0 !important; - } - - .flex-lg-shrink-1 { - flex-shrink: 1 !important; - } - - .flex-lg-wrap { - flex-wrap: wrap !important; - } - - .flex-lg-nowrap { - flex-wrap: nowrap !important; - } - - .flex-lg-wrap-reverse { - flex-wrap: wrap-reverse !important; - } - - .gap-lg-0 { - gap: 0 !important; - } - - .gap-lg-1 { - gap: 0.25rem !important; - } - - .gap-lg-2 { - gap: 0.5rem !important; - } - - .gap-lg-3 { - gap: 1rem !important; - } - - .gap-lg-4 { - gap: 1.5rem !important; - } - - .gap-lg-5 { - gap: 3rem !important; - } - - .justify-content-lg-start { - justify-content: flex-start !important; - } - - .justify-content-lg-end { - justify-content: flex-end !important; - } - - .justify-content-lg-center { - justify-content: center !important; - } - - .justify-content-lg-between { - justify-content: space-between !important; - } - - .justify-content-lg-around { - justify-content: space-around !important; - } - - .justify-content-lg-evenly { - justify-content: space-evenly !important; - } - - .align-items-lg-start { - align-items: flex-start !important; - } - - .align-items-lg-end { - align-items: flex-end !important; - } - - .align-items-lg-center { - align-items: center !important; - } - - .align-items-lg-baseline { - align-items: baseline !important; - } - - .align-items-lg-stretch { - align-items: stretch !important; - } - - .align-content-lg-start { - align-content: flex-start !important; - } - - .align-content-lg-end { - align-content: flex-end !important; - } - - .align-content-lg-center { - align-content: center !important; - } - - .align-content-lg-between { - align-content: space-between !important; - } - - .align-content-lg-around { - align-content: space-around !important; - } - - .align-content-lg-stretch { - align-content: stretch !important; - } - - .align-self-lg-auto { - align-self: auto !important; - } - - .align-self-lg-start { - align-self: flex-start !important; - } - - .align-self-lg-end { - align-self: flex-end !important; - } - - .align-self-lg-center { - align-self: center !important; - } - - .align-self-lg-baseline { - align-self: baseline !important; - } - - .align-self-lg-stretch { - align-self: stretch !important; - } - - .order-lg-first { - order: -1 !important; - } - - .order-lg-0 { - order: 0 !important; - } - - .order-lg-1 { - order: 1 !important; - } - - .order-lg-2 { - order: 2 !important; - } - - .order-lg-3 { - order: 3 !important; - } - - .order-lg-4 { - order: 4 !important; - } - - .order-lg-5 { - order: 5 !important; - } - - .order-lg-last { - order: 6 !important; - } - - .m-lg-0 { - margin: 0 !important; - } - - .m-lg-1 { - margin: 0.25rem !important; - } - - .m-lg-2 { - margin: 0.5rem !important; - } - - .m-lg-3 { - margin: 1rem !important; - } - - .m-lg-4 { - margin: 1.5rem !important; - } - - .m-lg-5 { - margin: 3rem !important; - } - - .m-lg-auto { - margin: auto !important; - } - - .mx-lg-0 { - margin-right: 0 !important; - margin-left: 0 !important; - } - - .mx-lg-1 { - margin-right: 0.25rem !important; - margin-left: 0.25rem !important; - } - - .mx-lg-2 { - margin-right: 0.5rem !important; - margin-left: 0.5rem !important; - } - - .mx-lg-3 { - margin-right: 1rem !important; - margin-left: 1rem !important; - } - - .mx-lg-4 { - margin-right: 1.5rem !important; - margin-left: 1.5rem !important; - } - - .mx-lg-5 { - margin-right: 3rem !important; - margin-left: 3rem !important; - } - - .mx-lg-auto { - margin-right: auto !important; - margin-left: auto !important; - } - - .my-lg-0 { - margin-top: 0 !important; - margin-bottom: 0 !important; - } - - .my-lg-1 { - margin-top: 0.25rem !important; - margin-bottom: 0.25rem !important; - } - - .my-lg-2 { - margin-top: 0.5rem !important; - margin-bottom: 0.5rem !important; - } - - .my-lg-3 { - margin-top: 1rem !important; - margin-bottom: 1rem !important; - } - - .my-lg-4 { - margin-top: 1.5rem !important; - margin-bottom: 1.5rem !important; - } - - .my-lg-5 { - margin-top: 3rem !important; - margin-bottom: 3rem !important; - } - - .my-lg-auto { - margin-top: auto !important; - margin-bottom: auto !important; - } - - .mt-lg-0 { - margin-top: 0 !important; - } - - .mt-lg-1 { - margin-top: 0.25rem !important; - } - - .mt-lg-2 { - margin-top: 0.5rem !important; - } - - .mt-lg-3 { - margin-top: 1rem !important; - } - - .mt-lg-4 { - margin-top: 1.5rem !important; - } - - .mt-lg-5 { - margin-top: 3rem !important; - } - - .mt-lg-auto { - margin-top: auto !important; - } - - .me-lg-0 { - margin-right: 0 !important; - } - - .me-lg-1 { - margin-right: 0.25rem !important; - } - - .me-lg-2 { - margin-right: 0.5rem !important; - } - - .me-lg-3 { - margin-right: 1rem !important; - } - - .me-lg-4 { - margin-right: 1.5rem !important; - } - - .me-lg-5 { - margin-right: 3rem !important; - } - - .me-lg-auto { - margin-right: auto !important; - } - - .mb-lg-0 { - margin-bottom: 0 !important; - } - - .mb-lg-1 { - margin-bottom: 0.25rem !important; - } - - .mb-lg-2 { - margin-bottom: 0.5rem !important; - } - - .mb-lg-3 { - margin-bottom: 1rem !important; - } - - .mb-lg-4 { - margin-bottom: 1.5rem !important; - } - - .mb-lg-5 { - margin-bottom: 3rem !important; - } - - .mb-lg-auto { - margin-bottom: auto !important; - } - - .ms-lg-0 { - margin-left: 0 !important; - } - - .ms-lg-1 { - margin-left: 0.25rem !important; - } - - .ms-lg-2 { - margin-left: 0.5rem !important; - } - - .ms-lg-3 { - margin-left: 1rem !important; - } - - .ms-lg-4 { - margin-left: 1.5rem !important; - } - - .ms-lg-5 { - margin-left: 3rem !important; - } - - .ms-lg-auto { - margin-left: auto !important; - } - - .p-lg-0 { - padding: 0 !important; - } - - .p-lg-1 { - padding: 0.25rem !important; - } - - .p-lg-2 { - padding: 0.5rem !important; - } - - .p-lg-3 { - padding: 1rem !important; - } - - .p-lg-4 { - padding: 1.5rem !important; - } - - .p-lg-5 { - padding: 3rem !important; - } - - .px-lg-0 { - padding-right: 0 !important; - padding-left: 0 !important; - } - - .px-lg-1 { - padding-right: 0.25rem !important; - padding-left: 0.25rem !important; - } - - .px-lg-2 { - padding-right: 0.5rem !important; - padding-left: 0.5rem !important; - } - - .px-lg-3 { - padding-right: 1rem !important; - padding-left: 1rem !important; - } - - .px-lg-4 { - padding-right: 1.5rem !important; - padding-left: 1.5rem !important; - } - - .px-lg-5 { - padding-right: 3rem !important; - padding-left: 3rem !important; - } - - .py-lg-0 { - padding-top: 0 !important; - padding-bottom: 0 !important; - } - - .py-lg-1 { - padding-top: 0.25rem !important; - padding-bottom: 0.25rem !important; - } - - .py-lg-2 { - padding-top: 0.5rem !important; - padding-bottom: 0.5rem !important; - } - - .py-lg-3 { - padding-top: 1rem !important; - padding-bottom: 1rem !important; - } - - .py-lg-4 { - padding-top: 1.5rem !important; - padding-bottom: 1.5rem !important; - } - - .py-lg-5 { - padding-top: 3rem !important; - padding-bottom: 3rem !important; - } - - .pt-lg-0 { - padding-top: 0 !important; - } - - .pt-lg-1 { - padding-top: 0.25rem !important; - } - - .pt-lg-2 { - padding-top: 0.5rem !important; - } - - .pt-lg-3 { - padding-top: 1rem !important; - } - - .pt-lg-4 { - padding-top: 1.5rem !important; - } - - .pt-lg-5 { - padding-top: 3rem !important; - } - - .pe-lg-0 { - padding-right: 0 !important; - } - - .pe-lg-1 { - padding-right: 0.25rem !important; - } - - .pe-lg-2 { - padding-right: 0.5rem !important; - } - - .pe-lg-3 { - padding-right: 1rem !important; - } - - .pe-lg-4 { - padding-right: 1.5rem !important; - } - - .pe-lg-5 { - padding-right: 3rem !important; - } - - .pb-lg-0 { - padding-bottom: 0 !important; - } - - .pb-lg-1 { - padding-bottom: 0.25rem !important; - } - - .pb-lg-2 { - padding-bottom: 0.5rem !important; - } - - .pb-lg-3 { - padding-bottom: 1rem !important; - } - - .pb-lg-4 { - padding-bottom: 1.5rem !important; - } - - .pb-lg-5 { - padding-bottom: 3rem !important; - } - - .ps-lg-0 { - padding-left: 0 !important; - } - - .ps-lg-1 { - padding-left: 0.25rem !important; - } - - .ps-lg-2 { - padding-left: 0.5rem !important; - } - - .ps-lg-3 { - padding-left: 1rem !important; - } - - .ps-lg-4 { - padding-left: 1.5rem !important; - } - - .ps-lg-5 { - padding-left: 3rem !important; - } - - .text-lg-start { - text-align: left !important; - } - - .text-lg-end { - text-align: right !important; - } - - .text-lg-center { - text-align: center !important; - } -} -@media (min-width: 1200px) { - .float-xl-start { - float: left !important; - } - - .float-xl-end { - float: right !important; - } - - .float-xl-none { - float: none !important; - } - - .d-xl-inline { - display: inline !important; - } - - .d-xl-inline-block { - display: inline-block !important; - } - - .d-xl-block { - display: block !important; - } - - .d-xl-grid { - display: grid !important; - } - - .d-xl-table { - display: table !important; - } - - .d-xl-table-row { - display: table-row !important; - } - - .d-xl-table-cell { - display: table-cell !important; - } - - .d-xl-flex { - display: flex !important; - } - - .d-xl-inline-flex { - display: inline-flex !important; - } - - .d-xl-none { - display: none !important; - } - - .flex-xl-fill { - flex: 1 1 auto !important; - } - - .flex-xl-row { - flex-direction: row !important; - } - - .flex-xl-column { - flex-direction: column !important; - } - - .flex-xl-row-reverse { - flex-direction: row-reverse !important; - } - - .flex-xl-column-reverse { - flex-direction: column-reverse !important; - } - - .flex-xl-grow-0 { - flex-grow: 0 !important; - } - - .flex-xl-grow-1 { - flex-grow: 1 !important; - } - - .flex-xl-shrink-0 { - flex-shrink: 0 !important; - } - - .flex-xl-shrink-1 { - flex-shrink: 1 !important; - } - - .flex-xl-wrap { - flex-wrap: wrap !important; - } - - .flex-xl-nowrap { - flex-wrap: nowrap !important; - } - - .flex-xl-wrap-reverse { - flex-wrap: wrap-reverse !important; - } - - .gap-xl-0 { - gap: 0 !important; - } - - .gap-xl-1 { - gap: 0.25rem !important; - } - - .gap-xl-2 { - gap: 0.5rem !important; - } - - .gap-xl-3 { - gap: 1rem !important; - } - - .gap-xl-4 { - gap: 1.5rem !important; - } - - .gap-xl-5 { - gap: 3rem !important; - } - - .justify-content-xl-start { - justify-content: flex-start !important; - } - - .justify-content-xl-end { - justify-content: flex-end !important; - } - - .justify-content-xl-center { - justify-content: center !important; - } - - .justify-content-xl-between { - justify-content: space-between !important; - } - - .justify-content-xl-around { - justify-content: space-around !important; - } - - .justify-content-xl-evenly { - justify-content: space-evenly !important; - } - - .align-items-xl-start { - align-items: flex-start !important; - } - - .align-items-xl-end { - align-items: flex-end !important; - } - - .align-items-xl-center { - align-items: center !important; - } - - .align-items-xl-baseline { - align-items: baseline !important; - } - - .align-items-xl-stretch { - align-items: stretch !important; - } - - .align-content-xl-start { - align-content: flex-start !important; - } - - .align-content-xl-end { - align-content: flex-end !important; - } - - .align-content-xl-center { - align-content: center !important; - } - - .align-content-xl-between { - align-content: space-between !important; - } - - .align-content-xl-around { - align-content: space-around !important; - } - - .align-content-xl-stretch { - align-content: stretch !important; - } - - .align-self-xl-auto { - align-self: auto !important; - } - - .align-self-xl-start { - align-self: flex-start !important; - } - - .align-self-xl-end { - align-self: flex-end !important; - } - - .align-self-xl-center { - align-self: center !important; - } - - .align-self-xl-baseline { - align-self: baseline !important; - } - - .align-self-xl-stretch { - align-self: stretch !important; - } - - .order-xl-first { - order: -1 !important; - } - - .order-xl-0 { - order: 0 !important; - } - - .order-xl-1 { - order: 1 !important; - } - - .order-xl-2 { - order: 2 !important; - } - - .order-xl-3 { - order: 3 !important; - } - - .order-xl-4 { - order: 4 !important; - } - - .order-xl-5 { - order: 5 !important; - } - - .order-xl-last { - order: 6 !important; - } - - .m-xl-0 { - margin: 0 !important; - } - - .m-xl-1 { - margin: 0.25rem !important; - } - - .m-xl-2 { - margin: 0.5rem !important; - } - - .m-xl-3 { - margin: 1rem !important; - } - - .m-xl-4 { - margin: 1.5rem !important; - } - - .m-xl-5 { - margin: 3rem !important; - } - - .m-xl-auto { - margin: auto !important; - } - - .mx-xl-0 { - margin-right: 0 !important; - margin-left: 0 !important; - } - - .mx-xl-1 { - margin-right: 0.25rem !important; - margin-left: 0.25rem !important; - } - - .mx-xl-2 { - margin-right: 0.5rem !important; - margin-left: 0.5rem !important; - } - - .mx-xl-3 { - margin-right: 1rem !important; - margin-left: 1rem !important; - } - - .mx-xl-4 { - margin-right: 1.5rem !important; - margin-left: 1.5rem !important; - } - - .mx-xl-5 { - margin-right: 3rem !important; - margin-left: 3rem !important; - } - - .mx-xl-auto { - margin-right: auto !important; - margin-left: auto !important; - } - - .my-xl-0 { - margin-top: 0 !important; - margin-bottom: 0 !important; - } - - .my-xl-1 { - margin-top: 0.25rem !important; - margin-bottom: 0.25rem !important; - } - - .my-xl-2 { - margin-top: 0.5rem !important; - margin-bottom: 0.5rem !important; - } - - .my-xl-3 { - margin-top: 1rem !important; - margin-bottom: 1rem !important; - } - - .my-xl-4 { - margin-top: 1.5rem !important; - margin-bottom: 1.5rem !important; - } - - .my-xl-5 { - margin-top: 3rem !important; - margin-bottom: 3rem !important; - } - - .my-xl-auto { - margin-top: auto !important; - margin-bottom: auto !important; - } - - .mt-xl-0 { - margin-top: 0 !important; - } - - .mt-xl-1 { - margin-top: 0.25rem !important; - } - - .mt-xl-2 { - margin-top: 0.5rem !important; - } - - .mt-xl-3 { - margin-top: 1rem !important; - } - - .mt-xl-4 { - margin-top: 1.5rem !important; - } - - .mt-xl-5 { - margin-top: 3rem !important; - } - - .mt-xl-auto { - margin-top: auto !important; - } - - .me-xl-0 { - margin-right: 0 !important; - } - - .me-xl-1 { - margin-right: 0.25rem !important; - } - - .me-xl-2 { - margin-right: 0.5rem !important; - } - - .me-xl-3 { - margin-right: 1rem !important; - } - - .me-xl-4 { - margin-right: 1.5rem !important; - } - - .me-xl-5 { - margin-right: 3rem !important; - } - - .me-xl-auto { - margin-right: auto !important; - } - - .mb-xl-0 { - margin-bottom: 0 !important; - } - - .mb-xl-1 { - margin-bottom: 0.25rem !important; - } - - .mb-xl-2 { - margin-bottom: 0.5rem !important; - } - - .mb-xl-3 { - margin-bottom: 1rem !important; - } - - .mb-xl-4 { - margin-bottom: 1.5rem !important; - } - - .mb-xl-5 { - margin-bottom: 3rem !important; - } - - .mb-xl-auto { - margin-bottom: auto !important; - } - - .ms-xl-0 { - margin-left: 0 !important; - } - - .ms-xl-1 { - margin-left: 0.25rem !important; - } - - .ms-xl-2 { - margin-left: 0.5rem !important; - } - - .ms-xl-3 { - margin-left: 1rem !important; - } - - .ms-xl-4 { - margin-left: 1.5rem !important; - } - - .ms-xl-5 { - margin-left: 3rem !important; - } - - .ms-xl-auto { - margin-left: auto !important; - } - - .p-xl-0 { - padding: 0 !important; - } - - .p-xl-1 { - padding: 0.25rem !important; - } - - .p-xl-2 { - padding: 0.5rem !important; - } - - .p-xl-3 { - padding: 1rem !important; - } - - .p-xl-4 { - padding: 1.5rem !important; - } - - .p-xl-5 { - padding: 3rem !important; - } - - .px-xl-0 { - padding-right: 0 !important; - padding-left: 0 !important; - } - - .px-xl-1 { - padding-right: 0.25rem !important; - padding-left: 0.25rem !important; - } - - .px-xl-2 { - padding-right: 0.5rem !important; - padding-left: 0.5rem !important; - } - - .px-xl-3 { - padding-right: 1rem !important; - padding-left: 1rem !important; - } - - .px-xl-4 { - padding-right: 1.5rem !important; - padding-left: 1.5rem !important; - } - - .px-xl-5 { - padding-right: 3rem !important; - padding-left: 3rem !important; - } - - .py-xl-0 { - padding-top: 0 !important; - padding-bottom: 0 !important; - } - - .py-xl-1 { - padding-top: 0.25rem !important; - padding-bottom: 0.25rem !important; - } - - .py-xl-2 { - padding-top: 0.5rem !important; - padding-bottom: 0.5rem !important; - } - - .py-xl-3 { - padding-top: 1rem !important; - padding-bottom: 1rem !important; - } - - .py-xl-4 { - padding-top: 1.5rem !important; - padding-bottom: 1.5rem !important; - } - - .py-xl-5 { - padding-top: 3rem !important; - padding-bottom: 3rem !important; - } - - .pt-xl-0 { - padding-top: 0 !important; - } - - .pt-xl-1 { - padding-top: 0.25rem !important; - } - - .pt-xl-2 { - padding-top: 0.5rem !important; - } - - .pt-xl-3 { - padding-top: 1rem !important; - } - - .pt-xl-4 { - padding-top: 1.5rem !important; - } - - .pt-xl-5 { - padding-top: 3rem !important; - } - - .pe-xl-0 { - padding-right: 0 !important; - } - - .pe-xl-1 { - padding-right: 0.25rem !important; - } - - .pe-xl-2 { - padding-right: 0.5rem !important; - } - - .pe-xl-3 { - padding-right: 1rem !important; - } - - .pe-xl-4 { - padding-right: 1.5rem !important; - } - - .pe-xl-5 { - padding-right: 3rem !important; - } - - .pb-xl-0 { - padding-bottom: 0 !important; - } - - .pb-xl-1 { - padding-bottom: 0.25rem !important; - } - - .pb-xl-2 { - padding-bottom: 0.5rem !important; - } - - .pb-xl-3 { - padding-bottom: 1rem !important; - } - - .pb-xl-4 { - padding-bottom: 1.5rem !important; - } - - .pb-xl-5 { - padding-bottom: 3rem !important; - } - - .ps-xl-0 { - padding-left: 0 !important; - } - - .ps-xl-1 { - padding-left: 0.25rem !important; - } - - .ps-xl-2 { - padding-left: 0.5rem !important; - } - - .ps-xl-3 { - padding-left: 1rem !important; - } - - .ps-xl-4 { - padding-left: 1.5rem !important; - } - - .ps-xl-5 { - padding-left: 3rem !important; - } - - .text-xl-start { - text-align: left !important; - } - - .text-xl-end { - text-align: right !important; - } - - .text-xl-center { - text-align: center !important; - } -} -@media (min-width: 1400px) { - .float-xxl-start { - float: left !important; - } - - .float-xxl-end { - float: right !important; - } - - .float-xxl-none { - float: none !important; - } - - .d-xxl-inline { - display: inline !important; - } - - .d-xxl-inline-block { - display: inline-block !important; - } - - .d-xxl-block { - display: block !important; - } - - .d-xxl-grid { - display: grid !important; - } - - .d-xxl-table { - display: table !important; - } - - .d-xxl-table-row { - display: table-row !important; - } - - .d-xxl-table-cell { - display: table-cell !important; - } - - .d-xxl-flex { - display: flex !important; - } - - .d-xxl-inline-flex { - display: inline-flex !important; - } - - .d-xxl-none { - display: none !important; - } - - .flex-xxl-fill { - flex: 1 1 auto !important; - } - - .flex-xxl-row { - flex-direction: row !important; - } - - .flex-xxl-column { - flex-direction: column !important; - } - - .flex-xxl-row-reverse { - flex-direction: row-reverse !important; - } - - .flex-xxl-column-reverse { - flex-direction: column-reverse !important; - } - - .flex-xxl-grow-0 { - flex-grow: 0 !important; - } - - .flex-xxl-grow-1 { - flex-grow: 1 !important; - } - - .flex-xxl-shrink-0 { - flex-shrink: 0 !important; - } - - .flex-xxl-shrink-1 { - flex-shrink: 1 !important; - } - - .flex-xxl-wrap { - flex-wrap: wrap !important; - } - - .flex-xxl-nowrap { - flex-wrap: nowrap !important; - } - - .flex-xxl-wrap-reverse { - flex-wrap: wrap-reverse !important; - } - - .gap-xxl-0 { - gap: 0 !important; - } - - .gap-xxl-1 { - gap: 0.25rem !important; - } - - .gap-xxl-2 { - gap: 0.5rem !important; - } - - .gap-xxl-3 { - gap: 1rem !important; - } - - .gap-xxl-4 { - gap: 1.5rem !important; - } - - .gap-xxl-5 { - gap: 3rem !important; - } - - .justify-content-xxl-start { - justify-content: flex-start !important; - } - - .justify-content-xxl-end { - justify-content: flex-end !important; - } - - .justify-content-xxl-center { - justify-content: center !important; - } - - .justify-content-xxl-between { - justify-content: space-between !important; - } - - .justify-content-xxl-around { - justify-content: space-around !important; - } - - .justify-content-xxl-evenly { - justify-content: space-evenly !important; - } - - .align-items-xxl-start { - align-items: flex-start !important; - } - - .align-items-xxl-end { - align-items: flex-end !important; - } - - .align-items-xxl-center { - align-items: center !important; - } - - .align-items-xxl-baseline { - align-items: baseline !important; - } - - .align-items-xxl-stretch { - align-items: stretch !important; - } - - .align-content-xxl-start { - align-content: flex-start !important; - } - - .align-content-xxl-end { - align-content: flex-end !important; - } - - .align-content-xxl-center { - align-content: center !important; - } - - .align-content-xxl-between { - align-content: space-between !important; - } - - .align-content-xxl-around { - align-content: space-around !important; - } - - .align-content-xxl-stretch { - align-content: stretch !important; - } - - .align-self-xxl-auto { - align-self: auto !important; - } - - .align-self-xxl-start { - align-self: flex-start !important; - } - - .align-self-xxl-end { - align-self: flex-end !important; - } - - .align-self-xxl-center { - align-self: center !important; - } - - .align-self-xxl-baseline { - align-self: baseline !important; - } - - .align-self-xxl-stretch { - align-self: stretch !important; - } - - .order-xxl-first { - order: -1 !important; - } - - .order-xxl-0 { - order: 0 !important; - } - - .order-xxl-1 { - order: 1 !important; - } - - .order-xxl-2 { - order: 2 !important; - } - - .order-xxl-3 { - order: 3 !important; - } - - .order-xxl-4 { - order: 4 !important; - } - - .order-xxl-5 { - order: 5 !important; - } - - .order-xxl-last { - order: 6 !important; - } - - .m-xxl-0 { - margin: 0 !important; - } - - .m-xxl-1 { - margin: 0.25rem !important; - } - - .m-xxl-2 { - margin: 0.5rem !important; - } - - .m-xxl-3 { - margin: 1rem !important; - } - - .m-xxl-4 { - margin: 1.5rem !important; - } - - .m-xxl-5 { - margin: 3rem !important; - } - - .m-xxl-auto { - margin: auto !important; - } - - .mx-xxl-0 { - margin-right: 0 !important; - margin-left: 0 !important; - } - - .mx-xxl-1 { - margin-right: 0.25rem !important; - margin-left: 0.25rem !important; - } - - .mx-xxl-2 { - margin-right: 0.5rem !important; - margin-left: 0.5rem !important; - } - - .mx-xxl-3 { - margin-right: 1rem !important; - margin-left: 1rem !important; - } - - .mx-xxl-4 { - margin-right: 1.5rem !important; - margin-left: 1.5rem !important; - } - - .mx-xxl-5 { - margin-right: 3rem !important; - margin-left: 3rem !important; - } - - .mx-xxl-auto { - margin-right: auto !important; - margin-left: auto !important; - } - - .my-xxl-0 { - margin-top: 0 !important; - margin-bottom: 0 !important; - } - - .my-xxl-1 { - margin-top: 0.25rem !important; - margin-bottom: 0.25rem !important; - } - - .my-xxl-2 { - margin-top: 0.5rem !important; - margin-bottom: 0.5rem !important; - } - - .my-xxl-3 { - margin-top: 1rem !important; - margin-bottom: 1rem !important; - } - - .my-xxl-4 { - margin-top: 1.5rem !important; - margin-bottom: 1.5rem !important; - } - - .my-xxl-5 { - margin-top: 3rem !important; - margin-bottom: 3rem !important; - } - - .my-xxl-auto { - margin-top: auto !important; - margin-bottom: auto !important; - } - - .mt-xxl-0 { - margin-top: 0 !important; - } - - .mt-xxl-1 { - margin-top: 0.25rem !important; - } - - .mt-xxl-2 { - margin-top: 0.5rem !important; - } - - .mt-xxl-3 { - margin-top: 1rem !important; - } - - .mt-xxl-4 { - margin-top: 1.5rem !important; - } - - .mt-xxl-5 { - margin-top: 3rem !important; - } - - .mt-xxl-auto { - margin-top: auto !important; - } - - .me-xxl-0 { - margin-right: 0 !important; - } - - .me-xxl-1 { - margin-right: 0.25rem !important; - } - - .me-xxl-2 { - margin-right: 0.5rem !important; - } - - .me-xxl-3 { - margin-right: 1rem !important; - } - - .me-xxl-4 { - margin-right: 1.5rem !important; - } - - .me-xxl-5 { - margin-right: 3rem !important; - } - - .me-xxl-auto { - margin-right: auto !important; - } - - .mb-xxl-0 { - margin-bottom: 0 !important; - } - - .mb-xxl-1 { - margin-bottom: 0.25rem !important; - } - - .mb-xxl-2 { - margin-bottom: 0.5rem !important; - } - - .mb-xxl-3 { - margin-bottom: 1rem !important; - } - - .mb-xxl-4 { - margin-bottom: 1.5rem !important; - } - - .mb-xxl-5 { - margin-bottom: 3rem !important; - } - - .mb-xxl-auto { - margin-bottom: auto !important; - } - - .ms-xxl-0 { - margin-left: 0 !important; - } - - .ms-xxl-1 { - margin-left: 0.25rem !important; - } - - .ms-xxl-2 { - margin-left: 0.5rem !important; - } - - .ms-xxl-3 { - margin-left: 1rem !important; - } - - .ms-xxl-4 { - margin-left: 1.5rem !important; - } - - .ms-xxl-5 { - margin-left: 3rem !important; - } - - .ms-xxl-auto { - margin-left: auto !important; - } - - .p-xxl-0 { - padding: 0 !important; - } - - .p-xxl-1 { - padding: 0.25rem !important; - } - - .p-xxl-2 { - padding: 0.5rem !important; - } - - .p-xxl-3 { - padding: 1rem !important; - } - - .p-xxl-4 { - padding: 1.5rem !important; - } - - .p-xxl-5 { - padding: 3rem !important; - } - - .px-xxl-0 { - padding-right: 0 !important; - padding-left: 0 !important; - } - - .px-xxl-1 { - padding-right: 0.25rem !important; - padding-left: 0.25rem !important; - } - - .px-xxl-2 { - padding-right: 0.5rem !important; - padding-left: 0.5rem !important; - } - - .px-xxl-3 { - padding-right: 1rem !important; - padding-left: 1rem !important; - } - - .px-xxl-4 { - padding-right: 1.5rem !important; - padding-left: 1.5rem !important; - } - - .px-xxl-5 { - padding-right: 3rem !important; - padding-left: 3rem !important; - } - - .py-xxl-0 { - padding-top: 0 !important; - padding-bottom: 0 !important; - } - - .py-xxl-1 { - padding-top: 0.25rem !important; - padding-bottom: 0.25rem !important; - } - - .py-xxl-2 { - padding-top: 0.5rem !important; - padding-bottom: 0.5rem !important; - } - - .py-xxl-3 { - padding-top: 1rem !important; - padding-bottom: 1rem !important; - } - - .py-xxl-4 { - padding-top: 1.5rem !important; - padding-bottom: 1.5rem !important; - } - - .py-xxl-5 { - padding-top: 3rem !important; - padding-bottom: 3rem !important; - } - - .pt-xxl-0 { - padding-top: 0 !important; - } - - .pt-xxl-1 { - padding-top: 0.25rem !important; - } - - .pt-xxl-2 { - padding-top: 0.5rem !important; - } - - .pt-xxl-3 { - padding-top: 1rem !important; - } - - .pt-xxl-4 { - padding-top: 1.5rem !important; - } - - .pt-xxl-5 { - padding-top: 3rem !important; - } - - .pe-xxl-0 { - padding-right: 0 !important; - } - - .pe-xxl-1 { - padding-right: 0.25rem !important; - } - - .pe-xxl-2 { - padding-right: 0.5rem !important; - } - - .pe-xxl-3 { - padding-right: 1rem !important; - } - - .pe-xxl-4 { - padding-right: 1.5rem !important; - } - - .pe-xxl-5 { - padding-right: 3rem !important; - } - - .pb-xxl-0 { - padding-bottom: 0 !important; - } - - .pb-xxl-1 { - padding-bottom: 0.25rem !important; - } - - .pb-xxl-2 { - padding-bottom: 0.5rem !important; - } - - .pb-xxl-3 { - padding-bottom: 1rem !important; - } - - .pb-xxl-4 { - padding-bottom: 1.5rem !important; - } - - .pb-xxl-5 { - padding-bottom: 3rem !important; - } - - .ps-xxl-0 { - padding-left: 0 !important; - } - - .ps-xxl-1 { - padding-left: 0.25rem !important; - } - - .ps-xxl-2 { - padding-left: 0.5rem !important; - } - - .ps-xxl-3 { - padding-left: 1rem !important; - } - - .ps-xxl-4 { - padding-left: 1.5rem !important; - } - - .ps-xxl-5 { - padding-left: 3rem !important; - } - - .text-xxl-start { - text-align: left !important; - } - - .text-xxl-end { - text-align: right !important; - } - - .text-xxl-center { - text-align: center !important; - } -} -@media (min-width: 1200px) { - .fs-1 { - font-size: 2.5rem !important; - } - - .fs-2 { - font-size: 2rem !important; - } - - .fs-3 { - font-size: 1.75rem !important; - } - - .fs-4 { - font-size: 1.5rem !important; - } -} -@media print { - .d-print-inline { - display: inline !important; - } - - .d-print-inline-block { - display: inline-block !important; - } - - .d-print-block { - display: block !important; - } - - .d-print-grid { - display: grid !important; - } - - .d-print-table { - display: table !important; - } - - .d-print-table-row { - display: table-row !important; - } - - .d-print-table-cell { - display: table-cell !important; - } - - .d-print-flex { - display: flex !important; - } - - .d-print-inline-flex { - display: inline-flex !important; - } - - .d-print-none { - display: none !important; - } -} - -/*# sourceMappingURL=bootstrap-utilities.css.map */ \ No newline at end of file diff --git a/spaces/TushDeMort/yolo/blur.py b/spaces/TushDeMort/yolo/blur.py deleted file mode 100644 index e8ec0384374bc765e88d8805a86c4b9432397653..0000000000000000000000000000000000000000 --- a/spaces/TushDeMort/yolo/blur.py +++ /dev/null @@ -1,18 +0,0 @@ -import numpy as np -import cv2 -import os -import base64 - -def blur_check(IMAGE_FILE): - - im_bytes = base64.b64decode(IMAGE_FILE) - im_arr = np.frombuffer(im_bytes, dtype=np.uint8) - img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR) - - grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - var = cv2.Laplacian(grey, cv2.CV_64F).var() - - if var < 120: - return 0 - else: - return 1 diff --git a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/__init__.py b/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/__init__.py deleted file mode 100644 index bb31f42f9107a0b748b878deb1c5768019d62b32..0000000000000000000000000000000000000000 --- a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import os -import sys - -from omegaconf import OmegaConf - -from minigpt4.common.registry import registry - -from minigpt4.datasets.builders import * -from minigpt4.models import * -from minigpt4.processors import * -from minigpt4.tasks import * - - -root_dir = os.path.dirname(os.path.abspath(__file__)) -default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml")) - -registry.register_path("library_root", root_dir) -repo_root = os.path.join(root_dir, "..") -registry.register_path("repo_root", repo_root) -cache_root = os.path.join(repo_root, default_cfg.env.cache_root) -registry.register_path("cache_root", cache_root) - -registry.register("MAX_INT", sys.maxsize) -registry.register("SPLIT_NAMES", ["train", "val", "test"]) diff --git a/spaces/Vrk/SkimLit/Dataset.py b/spaces/Vrk/SkimLit/Dataset.py deleted file mode 100644 index ef65df3a6d5f398f98d395853bab4d44b221f2e6..0000000000000000000000000000000000000000 --- a/spaces/Vrk/SkimLit/Dataset.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -import torch.nn as nn -from torch.utils.data import DataLoader, Dataset -import tensorflow as tf -import numpy as np - -def pad_sequences(sequences, max_seq_len=0): - """Pad sequences to max length in sequence.""" - max_seq_len = max(max_seq_len, max(len(sequence) for sequence in sequences)) - padded_sequences = np.zeros((len(sequences), max_seq_len)) - for i, sequence in enumerate(sequences): - padded_sequences[i][:len(sequence)] = sequence - return padded_sequences - -class SkimlitDataset(Dataset): - def __init__(self, text_seq, line_num, total_line): - self.text_seq = text_seq - self.line_num_one_hot = line_num - self.total_line_one_hot = total_line - - def __len__(self): - return len(self.text_seq) - - def __str__(self): - return f"" - - def __getitem__(self, index): - X = self.text_seq[index] - line_num = self.line_num_one_hot[index] - total_line = self.total_line_one_hot[index] - return [X, len(X), line_num, total_line] - - def collate_fn(self, batch): - """Processing on a batch""" - # Getting Input - batch = np.array(batch) - text_seq = batch[:,0] - seq_lens = batch[:, 1] - line_nums = batch[:, 2] - total_lines = batch[:, 3] - - # padding inputs - pad_text_seq = pad_sequences(sequences=text_seq) # max_seq_len=max_length - - # converting line nums into one-hot encoding - line_nums = tf.one_hot(line_nums, depth=20) - - # converting total lines into one-hot encoding - total_lines = tf.one_hot(total_lines, depth=24) - - # converting inputs to tensors - pad_text_seq = torch.LongTensor(pad_text_seq.astype(np.int32)) - seq_lens = torch.LongTensor(seq_lens.astype(np.int32)) - line_nums = torch.tensor(line_nums.numpy()) - total_lines = torch.tensor(total_lines.numpy()) - - return pad_text_seq, seq_lens, line_nums, total_lines - - def create_dataloader(self, batch_size, shuffle=False, drop_last=False): - dataloader = DataLoader(dataset=self, batch_size=batch_size, collate_fn=self.collate_fn, shuffle=shuffle, drop_last=drop_last, pin_memory=True) - return dataloader diff --git a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/libJPG/jpgd.h b/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/libJPG/jpgd.h deleted file mode 100644 index a1c0cac61839a6f66a42c341f50d5e36faad9a93..0000000000000000000000000000000000000000 --- a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/cpp/libJPG/jpgd.h +++ /dev/null @@ -1,316 +0,0 @@ -// jpgd.h - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -#ifndef JPEG_DECODER_H -#define JPEG_DECODER_H - -#include -#include -#include - -namespace jpgd -{ - typedef unsigned char uint8; - typedef signed short int16; - typedef unsigned short uint16; - typedef unsigned int uint; - typedef signed int int32; - - // Loads a JPEG image from a memory buffer or a file. - // req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA). - // On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB). - // Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly. - // Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp. -// BEGIN EPIC MOD -//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps); - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format); -// END EPIC MOD - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps); - - // Success/failure error codes. - enum jpgd_status - { - JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1, - JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE, - JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS, - JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH, - JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER, - JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS, - JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE, - JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR, - JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM - }; - - // Input stream interface. - // Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available. - // The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set. - // It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer. - // Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding. - class jpeg_decoder_stream - { - public: - jpeg_decoder_stream() { } - virtual ~jpeg_decoder_stream() { } - - // The read() method is called when the internal input buffer is empty. - // Parameters: - // pBuf - input buffer - // max_bytes_to_read - maximum bytes that can be written to pBuf - // pEOF_flag - set this to true if at end of stream (no more bytes remaining) - // Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0). - // Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full. - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0; - }; - - // stdio FILE stream class. - class jpeg_decoder_file_stream : public jpeg_decoder_stream - { - jpeg_decoder_file_stream(const jpeg_decoder_file_stream &); - jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &); - - FILE *m_pFile; - bool m_eof_flag, m_error_flag; - - public: - jpeg_decoder_file_stream(); - virtual ~jpeg_decoder_file_stream(); - - bool open(const char *Pfilename); - void close(); - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Memory stream class. - class jpeg_decoder_mem_stream : public jpeg_decoder_stream - { - const uint8 *m_pSrc_data; - uint m_ofs, m_size; - - public: - jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { } - jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { } - - virtual ~jpeg_decoder_mem_stream() { } - - bool open(const uint8 *pSrc_data, uint size); - void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; } - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Loads JPEG file from a jpeg_decoder_stream. - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps); - - enum - { - JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4, - JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384 - }; - - typedef int16 jpgd_quant_t; - typedef int16 jpgd_block_t; - - class jpeg_decoder - { - public: - // Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc. - // methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline. - jpeg_decoder(jpeg_decoder_stream *pStream); - - ~jpeg_decoder(); - - // Call this method after constructing the object to begin decompression. - // If JPGD_SUCCESS is returned you may then call decode() on each scanline. - int begin_decoding(); - - // Returns the next scan line. - // For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1). - // Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4). - // Returns JPGD_SUCCESS if a scan line has been returned. - // Returns JPGD_DONE if all scan lines have been returned. - // Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info. - int decode(const void** pScan_line, uint* pScan_line_len); - - inline jpgd_status get_error_code() const { return m_error_code; } - - inline int get_width() const { return m_image_x_size; } - inline int get_height() const { return m_image_y_size; } - - inline int get_num_components() const { return m_comps_in_frame; } - - inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; } - inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); } - - // Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file). - inline int get_total_bytes_read() const { return m_total_bytes_read; } - - private: - jpeg_decoder(const jpeg_decoder &); - jpeg_decoder &operator =(const jpeg_decoder &); - - typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int); - - struct huff_tables - { - bool ac_table; - uint look_up[256]; - uint look_up2[256]; - uint8 code_size[256]; - uint tree[512]; - }; - - struct coeff_buf - { - uint8 *pData; - int block_num_x, block_num_y; - int block_len_x, block_len_y; - int block_size; - }; - - struct mem_block - { - mem_block *m_pNext; - size_t m_used_count; - size_t m_size; - char m_data[1]; - }; - - jmp_buf m_jmp_state; - mem_block *m_pMem_blocks; - int m_image_x_size; - int m_image_y_size; - jpeg_decoder_stream *m_pStream; - int m_progressive_flag; - uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES]; - uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size - uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size - jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables - int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported) - int m_comps_in_frame; // # of components in frame - int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor - int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor - int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector - int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID - int m_comp_h_blocks[JPGD_MAX_COMPONENTS]; - int m_comp_v_blocks[JPGD_MAX_COMPONENTS]; - int m_comps_in_scan; // # of components in scan - int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan - int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector - int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector - int m_spectral_start; // spectral selection start - int m_spectral_end; // spectral selection end - int m_successive_low; // successive approximation low - int m_successive_high; // successive approximation high - int m_max_mcu_x_size; // MCU's max. X size in pixels - int m_max_mcu_y_size; // MCU's max. Y size in pixels - int m_blocks_per_mcu; - int m_max_blocks_per_row; - int m_mcus_per_row, m_mcus_per_col; - int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU]; - int m_total_lines_left; // total # lines left in image - int m_mcu_lines_left; // total # lines left in this MCU - int m_real_dest_bytes_per_scan_line; - int m_dest_bytes_per_scan_line; // rounded up - int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y) - huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES]; - coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS]; - coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS]; - int m_eob_run; - int m_block_y_mcu[JPGD_MAX_COMPONENTS]; - uint8* m_pIn_buf_ofs; - int m_in_buf_left; - int m_tem_flag; - bool m_eof_flag; - uint8 m_in_buf_pad_start[128]; - uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128]; - uint8 m_in_buf_pad_end[128]; - int m_bits_left; - uint m_bit_buf; - int m_restart_interval; - int m_restarts_left; - int m_next_restart_num; - int m_max_mcus_per_row; - int m_max_blocks_per_mcu; - int m_expanded_blocks_per_mcu; - int m_expanded_blocks_per_row; - int m_expanded_blocks_per_component; - bool m_freq_domain_chroma_upsample; - int m_max_mcus_per_col; - uint m_last_dc_val[JPGD_MAX_COMPONENTS]; - jpgd_block_t* m_pMCU_coefficients; - int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU]; - uint8* m_pSample_buf; - int m_crr[256]; - int m_cbb[256]; - int m_crg[256]; - int m_cbg[256]; - uint8* m_pScan_line_0; - uint8* m_pScan_line_1; - jpgd_status m_error_code; - bool m_ready_flag; - int m_total_bytes_read; - - void free_all_blocks(); - // BEGIN EPIC MOD - UE_NORETURN void stop_decoding(jpgd_status status); - // END EPIC MOD - void *alloc(size_t n, bool zero = false); - void word_clear(void *p, uint16 c, uint n); - void prep_in_buffer(); - void read_dht_marker(); - void read_dqt_marker(); - void read_sof_marker(); - void skip_variable_marker(); - void read_dri_marker(); - void read_sos_marker(); - int next_marker(); - int process_markers(); - void locate_soi_marker(); - void locate_sof_marker(); - int locate_sos_marker(); - void init(jpeg_decoder_stream * pStream); - void create_look_ups(); - void fix_in_buffer(); - void transform_mcu(int mcu_row); - void transform_mcu_expand(int mcu_row); - coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y); - inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y); - void load_next_row(); - void decode_next_row(); - void make_huff_table(int index, huff_tables *pH); - void check_quant_tables(); - void check_huff_tables(); - void calc_mcu_block_order(); - int init_scan(); - void init_frame(); - void process_restart(); - void decode_scan(pDecode_block_func decode_block_func); - void init_progressive(); - void init_sequential(); - void decode_start(); - void decode_init(jpeg_decoder_stream * pStream); - void H2V2Convert(); - void H2V1Convert(); - void H1V2Convert(); - void H1V1Convert(); - void gray_convert(); - void expanded_convert(); - void find_eoi(); - inline uint get_char(); - inline uint get_char(bool *pPadding_flag); - inline void stuff_char(uint8 q); - inline uint8 get_octet(); - inline uint get_bits(int num_bits); - inline uint get_bits_no_markers(int numbits); - inline int huff_decode(huff_tables *pH); - inline int huff_decode(huff_tables *pH, int& extrabits); - static inline uint8 clamp(int i); - static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - }; - -} // namespace jpgd - -#endif // JPEG_DECODER_H diff --git a/spaces/Wing0820/Real-CUGAN/README.md b/spaces/Wing0820/Real-CUGAN/README.md deleted file mode 100644 index d673114edadba73e80f33a3c71bc0dbee8758cc8..0000000000000000000000000000000000000000 --- a/spaces/Wing0820/Real-CUGAN/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Real CUGAN -emoji: 🐢 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -license: gpl-3.0 -duplicated_from: DianXian/Real-CUGAN ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Xenova/doodle-dash/assets/worker-6c635e10.js b/spaces/Xenova/doodle-dash/assets/worker-6c635e10.js deleted file mode 100644 index 848c792e8f84107a7500fb6278c36e155a6d3cad..0000000000000000000000000000000000000000 --- a/spaces/Xenova/doodle-dash/assets/worker-6c635e10.js +++ /dev/null @@ -1,1790 +0,0 @@ -var fn=Object.defineProperty;var gn=(nt,y,n)=>y in nt?fn(nt,y,{enumerable:!0,configurable:!0,writable:!0,value:n}):nt[y]=n;var Se=(nt,y,n)=>(gn(nt,typeof y!="symbol"?y+"":y,n),n);(function(){var nt;"use strict";function _mergeNamespaces(y,n){return n.forEach(function(a){a&&typeof a!="string"&&!Array.isArray(a)&&Object.keys(a).forEach(function(u){if(u!=="default"&&!(u in y)){var c=Object.getOwnPropertyDescriptor(a,u);Object.defineProperty(y,u,c.get?c:{enumerable:!0,get:function(){return a[u]}})}})}),Object.freeze(y)}function mobileTabletCheck(){let y=!1;return function(n){(/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino|android|ipad|playbook|silk/i.test(n)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw-(n|u)|c55\/|capi|ccwa|cdm-|cell|chtm|cldc|cmd-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc-s|devi|dica|dmob|do(c|p)o|ds(12|-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(-|_)|g1 u|g560|gene|gf-5|g-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd-(m|p|t)|hei-|hi(pt|ta)|hp( i|ip)|hs-c|ht(c(-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i-(20|go|ma)|i230|iac( |-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|-[a-w])|libw|lynx|m1-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|-([1-8]|c))|phil|pire|pl(ay|uc)|pn-2|po(ck|rt|se)|prox|psio|pt-g|qa-a|qc(07|12|21|32|60|-[2-7]|i-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h-|oo|p-)|sdk\/|se(c(-|0|1)|47|mc|nd|ri)|sgh-|shar|sie(-|m)|sk-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h-|v-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl-|tdg-|tel(i|m)|tim-|t-mo|to(pl|sh)|ts(70|m-|m3|m5)|tx-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas-|your|zeto|zte-/i.test(n.substr(0,4)))&&(y=!0)}(navigator.userAgent||navigator.vendor||("opera"in window&&typeof window.opera=="string"?window.opera:"")),y}const IS_MOBILE=mobileTabletCheck();var constants={DEFAULT_MODEL:"quickdraw-mobilevit-small",DEFAULT_QUANTIZED:!1,BANNED_LABELS:["animal migration","arm","barn","bat","circle","hexagon","stitches","sweather","van"],PREDICTION_REFRESH_TIME:10,BRUSH_SIZE:IS_MOBILE?12:16,TARGET_FPS:60,GAME_DURATION:1e3+.5,COUNTDOWN_TIMER:3,EASY_MODE_THRESHOLD:.2,EASY_MODE_DURATION:120*1e3,EASY_MODE_DELAY:3*1e3,SKIP_PENALTY:3*1e3,LABELS:{0:"aircraft carrier",1:"airplane",2:"alarm clock",3:"ambulance",4:"angel",5:"animal migration",6:"ant",7:"anvil",8:"apple",9:"arm",10:"asparagus",11:"axe",12:"backpack",13:"banana",14:"bandage",15:"barn",16:"baseball bat",17:"baseball",18:"basket",19:"basketball",20:"bat",21:"bathtub",22:"beach",23:"bear",24:"beard",25:"bed",26:"bee",27:"belt",28:"bench",29:"bicycle",30:"binoculars",31:"bird",32:"birthday cake",33:"blackberry",34:"blueberry",35:"book",36:"boomerang",37:"bottlecap",38:"bowtie",39:"bracelet",40:"brain",41:"bread",42:"bridge",43:"broccoli",44:"broom",45:"bucket",46:"bulldozer",47:"bus",48:"bush",49:"butterfly",50:"cactus",51:"cake",52:"calculator",53:"calendar",54:"camel",55:"camera",56:"camouflage",57:"campfire",58:"candle",59:"cannon",60:"canoe",61:"car",62:"carrot",63:"castle",64:"cat",65:"ceiling fan",66:"cell phone",67:"cello",68:"chair",69:"chandelier",70:"church",71:"circle",72:"clarinet",73:"clock",74:"cloud",75:"coffee cup",76:"compass",77:"computer",78:"cookie",79:"cooler",80:"couch",81:"cow",82:"crab",83:"crayon",84:"crocodile",85:"crown",86:"cruise ship",87:"cup",88:"diamond",89:"dishwasher",90:"diving board",91:"dog",92:"dolphin",93:"donut",94:"door",95:"dragon",96:"dresser",97:"drill",98:"drums",99:"duck",100:"dumbbell",101:"ear",102:"elbow",103:"elephant",104:"envelope",105:"eraser",106:"eye",107:"eyeglasses",108:"face",109:"fan",110:"feather",111:"fence",112:"finger",113:"fire hydrant",114:"fireplace",115:"firetruck",116:"fish",117:"flamingo",118:"flashlight",119:"flip flops",120:"floor lamp",121:"flower",122:"flying saucer",123:"foot",124:"fork",125:"frog",126:"frying pan",127:"garden hose",128:"garden",129:"giraffe",130:"goatee",131:"golf club",132:"grapes",133:"grass",134:"guitar",135:"hamburger",136:"hammer",137:"hand",138:"harp",139:"hat",140:"headphones",141:"hedgehog",142:"helicopter",143:"helmet",144:"hexagon",145:"hockey puck",146:"hockey stick",147:"horse",148:"hospital",149:"hot air balloon",150:"hot dog",151:"hot tub",152:"hourglass",153:"house plant",154:"house",155:"hurricane",156:"ice cream",157:"jacket",158:"jail",159:"kangaroo",160:"key",161:"keyboard",162:"knee",163:"knife",164:"ladder",165:"lantern",166:"laptop",167:"leaf",168:"leg",169:"light bulb",170:"lighter",171:"lighthouse",172:"lightning",173:"line",174:"lion",175:"lipstick",176:"lobster",177:"lollipop",178:"mailbox",179:"map",180:"marker",181:"matches",182:"megaphone",183:"mermaid",184:"microphone",185:"microwave",186:"monkey",187:"moon",188:"mosquito",189:"motorbike",190:"mountain",191:"mouse",192:"moustache",193:"mouth",194:"mug",195:"mushroom",196:"nail",197:"necklace",198:"nose",199:"ocean",200:"octagon",201:"octopus",202:"onion",203:"oven",204:"owl",205:"paint can",206:"paintbrush",207:"palm tree",208:"panda",209:"pants",210:"paper clip",211:"parachute",212:"parrot",213:"passport",214:"peanut",215:"pear",216:"peas",217:"pencil",218:"penguin",219:"piano",220:"pickup truck",221:"picture frame",222:"pig",223:"pillow",224:"pineapple",225:"pizza",226:"pliers",227:"police car",228:"pond",229:"pool",230:"popsicle",231:"postcard",232:"potato",233:"power outlet",234:"purse",235:"rabbit",236:"raccoon",237:"radio",238:"rain",239:"rainbow",240:"rake",241:"remote control",242:"rhinoceros",243:"rifle",244:"river",245:"roller coaster",246:"rollerskates",247:"sailboat",248:"sandwich",249:"saw",250:"saxophone",251:"school bus",252:"scissors",253:"scorpion",254:"screwdriver",255:"sea turtle",256:"see saw",257:"shark",258:"sheep",259:"shoe",260:"shorts",261:"shovel",262:"sink",263:"skateboard",264:"skull",265:"skyscraper",266:"sleeping bag",267:"smiley face",268:"snail",269:"snake",270:"snorkel",271:"snowflake",272:"snowman",273:"soccer ball",274:"sock",275:"speedboat",276:"spider",277:"spoon",278:"spreadsheet",279:"square",280:"squiggle",281:"squirrel",282:"stairs",283:"star",284:"steak",285:"stereo",286:"stethoscope",287:"stitches",288:"stop sign",289:"stove",290:"strawberry",291:"streetlight",292:"string bean",293:"submarine",294:"suitcase",295:"sun",296:"swan",297:"sweater",298:"swing set",299:"sword",300:"syringe",301:"t-shirt",302:"table",303:"teapot",304:"teddy-bear",305:"telephone",306:"television",307:"tennis racquet",308:"tent",309:"The Eiffel Tower",310:"The Great Wall of China",311:"The Mona Lisa",312:"tiger",313:"toaster",314:"toe",315:"toilet",316:"tooth",317:"toothbrush",318:"toothpaste",319:"tornado",320:"tractor",321:"traffic light",322:"train",323:"tree",324:"triangle",325:"trombone",326:"truck",327:"trumpet",328:"umbrella",329:"underwear",330:"van",331:"vase",332:"violin",333:"washing machine",334:"watermelon",335:"waterslide",336:"whale",337:"wheel",338:"windmill",339:"wine bottle",340:"wine glass",341:"wristwatch",342:"yoga",343:"zebra",344:"zigzag"}};function dispatchCallback(y,n){y!==null&&y(n)}function reverseDictionary(y){return Object.fromEntries(Object.entries(y).map(([n,a])=>[a,n]))}function escapeRegExp(y){return y.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}const Callable=class{constructor(){let y=function(...n){return y._call(...n)};return Object.setPrototypeOf(y,new.target.prototype)}_call(...y){throw Error("Must implement _call method in subclass")}};function isString(y){return typeof y=="string"||y instanceof String}function isTypedArray(y){var n,a,u;return((u=(a=(n=y==null?void 0:y.prototype)==null?void 0:n.__proto__)==null?void 0:a.constructor)==null?void 0:u.name)==="TypedArray"}function isIntegralNumber(y){return Number.isInteger(y)||typeof y=="bigint"}function exists(y){return y!=null}function calculateDimensions(y){const n=[];let a=y;for(;Array.isArray(a);)n.push(a.length),a=a[0];return n}function pop(y,n,a=void 0){const u=y[n];if(u!==void 0)return delete y[n],u;if(a===void 0)throw Error(`Key ${n} does not exist in object.`);return a}function mergeArrays(...y){return Array.prototype.concat.apply([],y)}var fs={},ONNX_NODE=Object.freeze({__proto__:null,default:fs});function getDefaultExportFromCjs(y){return y&&y.__esModule&&Object.prototype.hasOwnProperty.call(y,"default")?y.default:y}function getAugmentedNamespace(y){if(y.__esModule)return y;var n=y.default;if(typeof n=="function"){var a=function u(){if(this instanceof u){var c=[null];c.push.apply(c,arguments);var p=Function.bind.apply(n,c);return new p}return n.apply(this,arguments)};a.prototype=n.prototype}else a={};return Object.defineProperty(a,"__esModule",{value:!0}),Object.keys(y).forEach(function(u){var c=Object.getOwnPropertyDescriptor(y,u);Object.defineProperty(a,u,c.get?c:{enumerable:!0,get:function(){return y[u]}})}),a}var ortWeb_min$1={exports:{}};const backends={},backendsSortedByPriority=[],registerBackend=(y,n,a)=>{if(n&&typeof n.init=="function"&&typeof n.createSessionHandler=="function"){const u=backends[y];if(u===void 0)backends[y]={backend:n,priority:a};else{if(u.priority>a)return;if(u.priority===a&&u.backend!==n)throw new Error(`cannot register backend "${y}" using priority ${a}`)}if(a>=0){const c=backendsSortedByPriority.indexOf(y);c!==-1&&backendsSortedByPriority.splice(c,1);for(let p=0;p{const n=y.length===0?backendsSortedByPriority:y,a=[];for(const u of n){const c=backends[u];if(c){if(c.initialized)return c.backend;if(c.aborted)continue;const p=!!c.initPromise;try{return p||(c.initPromise=c.backend.init()),await c.initPromise,c.initialized=!0,c.backend}catch(s){p||a.push({name:u,err:s}),c.aborted=!0}finally{delete c.initPromise}}}throw new Error(`no available backend found. ERR: ${a.map(u=>`[${u.name}] ${u.err}`).join(", ")}`)};class EnvImpl{constructor(){this.wasm={},this.webgl={},this.logLevelInternal="warning"}set logLevel(n){if(n!==void 0){if(typeof n!="string"||["verbose","info","warning","error","fatal"].indexOf(n)===-1)throw new Error(`Unsupported logging level: ${n}`);this.logLevelInternal=n}}get logLevel(){return this.logLevelInternal}}const env$1=new EnvImpl,isBigInt64ArrayAvailable=typeof BigInt64Array<"u"&&typeof BigInt64Array.from=="function",isBigUint64ArrayAvailable=typeof BigUint64Array<"u"&&typeof BigUint64Array.from=="function",NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP=new Map([["float32",Float32Array],["uint8",Uint8Array],["int8",Int8Array],["uint16",Uint16Array],["int16",Int16Array],["int32",Int32Array],["bool",Uint8Array],["float64",Float64Array],["uint32",Uint32Array]]),NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP=new Map([[Float32Array,"float32"],[Uint8Array,"uint8"],[Int8Array,"int8"],[Uint16Array,"uint16"],[Int16Array,"int16"],[Int32Array,"int32"],[Float64Array,"float64"],[Uint32Array,"uint32"]]);isBigInt64ArrayAvailable&&(NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set("int64",BigInt64Array),NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigInt64Array,"int64")),isBigUint64ArrayAvailable&&(NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set("uint64",BigUint64Array),NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigUint64Array,"uint64"));const calculateSize=y=>{let n=1;for(let a=0;a{const t=document.createElement("canvas"),e=t.getContext("2d");if(!n||!e)return o();const r=new Image;r.crossOrigin="Anonymous",r.src=n,r.onload=()=>{t.width=r.width,t.height=r.height,e.drawImage(r,0,0,t.width,t.height);const i=e.getImageData(0,0,t.width,t.height);if(a!==void 0){if(a.height!==void 0&&a.height!==t.height)throw new Error("Image input config height doesn't match ImageBitmap height");if(f.height=t.height,a.width!==void 0&&a.width!==t.width)throw new Error("Image input config width doesn't match ImageBitmap width");f.width=t.width}else f.height=t.height,f.width=t.width;l(ut.bufferToTensor(i.data,f))}});throw new Error("Input data provided is not supported - aborted tensor creation")}if(h!==void 0)return ut.bufferToTensor(h,f);throw new Error("Input data provided is not supported - aborted tensor creation")}toImageData(n){var a,u;const c=document.createElement("canvas").getContext("2d");let p;if(c!=null){const s=this.dims[3],h=this.dims[2],f=this.dims[1],l=n!==void 0&&n.format!==void 0?n.format:"RGB",o=n!==void 0&&((a=n.norm)===null||a===void 0?void 0:a.mean)!==void 0?n.norm.mean:255,t=n!==void 0&&((u=n.norm)===null||u===void 0?void 0:u.bias)!==void 0?n.norm.bias:0,e=h*s;if(n!==void 0){if(n.height!==void 0&&n.height!==h)throw new Error("Image output config height doesn't match tensor height");if(n.width!==void 0&&n.width!==s)throw new Error("Image output config width doesn't match tensor width");if(n.format!==void 0&&f===4&&n.format!=="RGBA"||f===3&&n.format!=="RGB"&&n.format!=="BGR")throw new Error("Tensor format doesn't match input tensor dims")}const r=4;let i=0,d=1,g=2,m=3,b=0,_=e,v=e*2,w=-1;l==="RGBA"?(b=0,_=e,v=e*2,w=e*3):l==="RGB"?(b=0,_=e,v=e*2):l==="RBG"&&(b=0,v=e,_=e*2),p=c.createImageData(s,h);for(let S=0;S"u")throw new Error(`input '${l}' is missing in 'feeds'.`);if(s)for(const l of this.outputNames)c[l]=null;const h=await this.handler.run(n,c,p),f={};for(const l in h)Object.hasOwnProperty.call(h,l)&&(f[l]=new Tensor$1(h[l].type,h[l].data,h[l].dims));return f}static async create(n,a,u,c){let p,s={};if(typeof n=="string"){if(p=n,typeof a=="object"&&a!==null)s=a;else if(typeof a<"u")throw new TypeError("'options' must be an object.")}else if(n instanceof Uint8Array){if(p=n,typeof a=="object"&&a!==null)s=a;else if(typeof a<"u")throw new TypeError("'options' must be an object.")}else if(n instanceof ArrayBuffer||typeof SharedArrayBuffer<"u"&&n instanceof SharedArrayBuffer){const t=n;let e=0,r=n.byteLength;if(typeof a=="object"&&a!==null)s=a;else if(typeof a=="number"){if(e=a,!Number.isSafeInteger(e))throw new RangeError("'byteOffset' must be an integer.");if(e<0||e>=t.byteLength)throw new RangeError(`'byteOffset' is out of range [0, ${t.byteLength}).`);if(r=n.byteLength-e,typeof u=="number"){if(r=u,!Number.isSafeInteger(r))throw new RangeError("'byteLength' must be an integer.");if(r<=0||e+r>t.byteLength)throw new RangeError(`'byteLength' is out of range (0, ${t.byteLength-e}].`);if(typeof c=="object"&&c!==null)s=c;else if(typeof c<"u")throw new TypeError("'options' must be an object.")}else if(typeof u<"u")throw new TypeError("'byteLength' must be a number.")}else if(typeof a<"u")throw new TypeError("'options' must be an object.");p=new Uint8Array(t,e,r)}else throw new TypeError("Unexpected argument[0]: must be 'path' or 'buffer'.");const f=(s.executionProviders||[]).map(t=>typeof t=="string"?t:t.name),o=await(await resolveBackend(f)).createSessionHandler(p,s);return new dn(o)}startProfiling(){this.handler.startProfiling()}endProfiling(){this.handler.endProfiling()}get inputNames(){return this.handler.inputNames}get outputNames(){return this.handler.outputNames}};const InferenceSession$1=InferenceSession$2;var lib=Object.freeze({__proto__:null,InferenceSession:InferenceSession$1,Tensor:Tensor$1,env:env$1,registerBackend}),require$$0=getAugmentedNamespace(lib);/*! -* ONNX Runtime Web v1.14.0 -* Copyright (c) Microsoft Corporation. All rights reserved. -* Licensed under the MIT License. -*/(function(module,exports){(function(y,n){module.exports=n(require$$0)})(self,__WEBPACK_EXTERNAL_MODULE__1670__=>(()=>{var __webpack_modules__={3474:(y,n,a)=>{var u,c=(u=(u=typeof document<"u"&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(p){function s(){return X.buffer!=ee&&Ee(X.buffer),ue}function h(){return X.buffer!=ee&&Ee(X.buffer),Ae}function f(){return X.buffer!=ee&&Ee(X.buffer),ve}function l(){return X.buffer!=ee&&Ee(X.buffer),oe}function o(){return X.buffer!=ee&&Ee(X.buffer),_e}var t,e,r;p=p||{},t||(t=p!==void 0?p:{}),t.ready=new Promise(function(T,E){e=T,r=E});var i,d,g,m,b,_,v=Object.assign({},t),w="./this.program",S=(T,E)=>{throw E},A=typeof window=="object",O=typeof importScripts=="function",x=typeof process=="object"&&typeof process.versions=="object"&&typeof process.versions.node=="string",I=t.ENVIRONMENT_IS_PTHREAD||!1,N="";function B(T){return t.locateFile?t.locateFile(T,N):N+T}if(x){let T;N=O?a(908).dirname(N)+"/":"//",_=()=>{b||(m=a(1384),b=a(908))},i=function(E,k){return _(),E=b.normalize(E),m.readFileSync(E,k?void 0:"utf8")},g=E=>((E=i(E,!0)).buffer||(E=new Uint8Array(E)),E),d=(E,k,C)=>{_(),E=b.normalize(E),m.readFile(E,function(z,G){z?C(z):k(G.buffer)})},1{if(qe())throw process.exitCode=E,k;k instanceof Qe||j("exiting due to exception: "+k),process.exit(E)},t.inspect=function(){return"[Emscripten Module object]"};try{T=a(9925)}catch(E){throw console.error('The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?'),E}a.g.Worker=T.Worker}else(A||O)&&(O?N=self.location.href:typeof document<"u"&&document.currentScript&&(N=document.currentScript.src),u&&(N=u),N=N.indexOf("blob:")!==0?N.substr(0,N.replace(/[?#].*/,"").lastIndexOf("/")+1):"",x||(i=T=>{var E=new XMLHttpRequest;return E.open("GET",T,!1),E.send(null),E.responseText},O&&(g=T=>{var E=new XMLHttpRequest;return E.open("GET",T,!1),E.responseType="arraybuffer",E.send(null),new Uint8Array(E.response)}),d=(T,E,k)=>{var C=new XMLHttpRequest;C.open("GET",T,!0),C.responseType="arraybuffer",C.onload=()=>{C.status==200||C.status==0&&C.response?E(C.response):k()},C.onerror=k,C.send(null)}));x&&typeof performance>"u"&&(a.g.performance=a(6953).performance);var L=console.log.bind(console),F=console.warn.bind(console);x&&(_(),L=T=>m.writeSync(1,T+` -`),F=T=>m.writeSync(2,T+` -`));var H,D=t.print||L,j=t.printErr||F;Object.assign(t,v),v=null,t.thisProgram&&(w=t.thisProgram),t.quit&&(S=t.quit),t.wasmBinary&&(H=t.wasmBinary);var Z=t.noExitRuntime||!1;typeof WebAssembly!="object"&&fe("no native wasm support detected");var X,J,ee,ue,Ae,ve,oe,_e,be=!1,ke=typeof TextDecoder<"u"?new TextDecoder("utf8"):void 0;function Fe(T,E,k){var C=(E>>>=0)+k;for(k=E;T[k]&&!(k>=C);)++k;if(16(z=(240&z)==224?(15&z)<<12|G<<6|K:(7&z)<<18|G<<12|K<<6|63&T[E++])?C+=String.fromCharCode(z):(z-=65536,C+=String.fromCharCode(55296|z>>10,56320|1023&z))}}else C+=String.fromCharCode(z)}return C}function xe(T,E){return(T>>>=0)?Fe(h(),T,E):""}function Ne(T,E,k,C){if(!(0>>=0;C=k+C-1;for(var G=0;G=K&&(K=65536+((1023&K)<<10)|1023&T.charCodeAt(++G)),127>=K){if(k>=C)break;E[k++>>>0]=K}else{if(2047>=K){if(k+1>=C)break;E[k++>>>0]=192|K>>6}else{if(65535>=K){if(k+2>=C)break;E[k++>>>0]=224|K>>12}else{if(k+3>=C)break;E[k++>>>0]=240|K>>18,E[k++>>>0]=128|K>>12&63}E[k++>>>0]=128|K>>6&63}E[k++>>>0]=128|63&K}}return E[k>>>0]=0,k-z}function Ce(T){for(var E=0,k=0;k=C?E++:2047>=C?E+=2:55296<=C&&57343>=C?(E+=4,++k):E+=3}return E}function Ee(T){ee=T,t.HEAP8=ue=new Int8Array(T),t.HEAP16=new Int16Array(T),t.HEAP32=ve=new Int32Array(T),t.HEAPU8=Ae=new Uint8Array(T),t.HEAPU16=new Uint16Array(T),t.HEAPU32=oe=new Uint32Array(T),t.HEAPF32=new Float32Array(T),t.HEAPF64=_e=new Float64Array(T)}I&&(ee=t.buffer);var Oe=t.INITIAL_MEMORY||16777216;if(I)X=t.wasmMemory,ee=t.buffer;else if(t.wasmMemory)X=t.wasmMemory;else if(!((X=new WebAssembly.Memory({initial:Oe/65536,maximum:65536,shared:!0})).buffer instanceof SharedArrayBuffer))throw j("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"),x&&console.log("(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)"),Error("bad memory");X&&(ee=X.buffer),Oe=ee.byteLength,Ee(ee);var Be,Ge=[],Ve=[],Xe=[],Ze=[];function qe(){return Z||!1}function Ue(){var T=t.preRun.shift();Ge.unshift(T)}var Ie,je=0,Ye=null;function fe(T){throw I?postMessage({cmd:"onAbort",arg:T}):t.onAbort&&t.onAbort(T),j(T="Aborted("+T+")"),be=!0,T=new WebAssembly.RuntimeError(T+". Build with -sASSERTIONS for more info."),r(T),T}function pt(){return Ie.startsWith("data:application/octet-stream;base64,")}function lt(){var T=Ie;try{if(T==Ie&&H)return new Uint8Array(H);if(g)return g(T);throw"both async and sync fetching of the wasm failed"}catch(E){fe(E)}}Ie="ort-wasm-threaded.wasm",pt()||(Ie=B(Ie));var Pt={};function Qe(T){this.name="ExitStatus",this.message="Program terminated with exit("+T+")",this.status=T}function ct(T){(T=re.Vb[T])||fe(),re.mc(T)}function dt(T){var E=re.Cc();if(!E)return 6;re.ac.push(E),re.Vb[T.Ub]=E,E.Ub=T.Ub;var k={cmd:"run",start_routine:T.Ic,arg:T.zc,pthread_ptr:T.Ub};return E.$b=()=>{k.time=performance.now(),E.postMessage(k,T.Nc)},E.loaded&&(E.$b(),delete E.$b),0}function Re(T){if(I)return Q(1,1,T);qe()||(re.oc(),t.onExit&&t.onExit(T),be=!0),S(T,new Qe(T))}function it(T,E){if(!E&&I)throw kt(T),"unwind";qe()||I||(Wt(),rt(Xe),qt(0),$t[1].length&&Ft(1,10),$t[2].length&&Ft(2,10),re.oc()),Re(T)}var re={Yb:[],ac:[],qc:[],Vb:{},fc:function(){I&&re.Ec()},Pc:function(){},Ec:function(){re.receiveObjectTransfer=re.Gc,re.threadInitTLS=re.pc,re.setExitStatus=re.nc,Z=!1},nc:function(){},oc:function(){for(var T of Object.values(re.Vb))re.mc(T);for(T of re.Yb)T.terminate();re.Yb=[]},mc:function(T){var E=T.Ub;delete re.Vb[E],re.Yb.push(T),re.ac.splice(re.ac.indexOf(T),1),T.Ub=0,Rt(E)},Gc:function(){},pc:function(){re.qc.forEach(T=>T())},Fc:function(T,E){T.onmessage=k=>{var C=(k=k.data).cmd;if(T.Ub&&(re.Bc=T.Ub),k.targetThread&&k.targetThread!=Dt()){var z=re.Vb[k.Qc];z?z.postMessage(k,k.transferList):j('Internal error! Worker sent a message "'+C+'" to target pthread '+k.targetThread+", but that thread no longer exists!")}else C==="processProxyingQueue"?$(k.queue):C==="spawnThread"?dt(k):C==="cleanupThread"?ct(k.thread):C==="killThread"?(k=k.thread,C=re.Vb[k],delete re.Vb[k],C.terminate(),Rt(k),re.ac.splice(re.ac.indexOf(C),1),C.Ub=0):C==="cancelThread"?re.Vb[k.thread].postMessage({cmd:"cancel"}):C==="loaded"?(T.loaded=!0,E&&E(T),T.$b&&(T.$b(),delete T.$b)):C==="print"?D("Thread "+k.threadId+": "+k.text):C==="printErr"?j("Thread "+k.threadId+": "+k.text):C==="alert"?alert("Thread "+k.threadId+": "+k.text):k.target==="setimmediate"?T.postMessage(k):C==="onAbort"?t.onAbort&&t.onAbort(k.arg):C&&j("worker sent an unknown command "+C);re.Bc=void 0},T.onerror=k=>{throw j("worker sent an error! "+k.filename+":"+k.lineno+": "+k.message),k},x&&(T.on("message",function(k){T.onmessage({data:k})}),T.on("error",function(k){T.onerror(k)}),T.on("detachedExit",function(){})),T.postMessage({cmd:"load",urlOrBlob:t.mainScriptUrlOrBlob||u,wasmMemory:X,wasmModule:J})},yc:function(){var T=B("ort-wasm-threaded.worker.js");re.Yb.push(new Worker(T))},Cc:function(){return re.Yb.length==0&&(re.yc(),re.Fc(re.Yb[0])),re.Yb.pop()}};function rt(T){for(;0>2>>>0];T=f()[T+48>>2>>>0],Zt(E,E-T),ce(E)};var Je=[];function we(T){var E=Je[T];return E||(T>=Je.length&&(Je.length=T+1),Je[T]=E=Be.get(T)),E}t.invokeEntryPoint=function(T,E){T=we(T)(E),qe()?re.nc(T):Kt(T)};var ot,ft,st=[],ae=0,ie=0;function se(T){this.Zb=T,this.Sb=T-24,this.xc=function(E){l()[this.Sb+4>>2>>>0]=E},this.bc=function(){return l()[this.Sb+4>>2>>>0]},this.wc=function(E){l()[this.Sb+8>>2>>>0]=E},this.Dc=function(){return l()[this.Sb+8>>2>>>0]},this.rc=function(){f()[this.Sb>>2>>>0]=0},this.hc=function(E){E=E?1:0,s()[this.Sb+12>>0>>>0]=E},this.uc=function(){return s()[this.Sb+12>>0>>>0]!=0},this.ic=function(E){E=E?1:0,s()[this.Sb+13>>0>>>0]=E},this.kc=function(){return s()[this.Sb+13>>0>>>0]!=0},this.fc=function(E,k){this.cc(0),this.xc(E),this.wc(k),this.rc(),this.hc(!1),this.ic(!1)},this.sc=function(){Atomics.add(f(),this.Sb>>2,1)},this.Hc=function(){return Atomics.sub(f(),this.Sb>>2,1)===1},this.cc=function(E){l()[this.Sb+16>>2>>>0]=E},this.tc=function(){return l()[this.Sb+16>>2>>>0]},this.vc=function(){if(Qt(this.bc()))return l()[this.Zb>>2>>>0];var E=this.tc();return E!==0?E:this.Zb}}function gt(T){return Vt(new se(T).Sb)}function at(T,E,k,C){return I?Q(3,1,T,E,k,C):mt(T,E,k,C)}function mt(T,E,k,C){if(typeof SharedArrayBuffer>"u")return j("Current environment does not support SharedArrayBuffer, pthreads are not available!"),6;var z=[];return I&&z.length===0?at(T,E,k,C):(T={Ic:k,Ub:T,zc:C,Nc:z},I?(T.Oc="spawnThread",postMessage(T,z),0):dt(T))}function bt(T,E,k){return I?Q(4,1,T,E,k):0}function yt(T,E){if(I)return Q(5,1,T,E)}function _t(T,E){if(I)return Q(6,1,T,E)}function wt(T,E,k){if(I)return Q(7,1,T,E,k)}function vt(T,E,k){return I?Q(8,1,T,E,k):0}function xt(T,E){if(I)return Q(9,1,T,E)}function Tt(T,E,k){if(I)return Q(10,1,T,E,k)}function St(T,E,k,C){if(I)return Q(11,1,T,E,k,C)}function At(T,E,k,C){if(I)return Q(12,1,T,E,k,C)}function Ot(T,E,k,C){if(I)return Q(13,1,T,E,k,C)}function Et(T){if(I)return Q(14,1,T)}function P(T,E){if(I)return Q(15,1,T,E)}function M(T,E,k){if(I)return Q(16,1,T,E,k)}function $(T){Atomics.store(f(),T>>2,1),Dt()&&Yt(T),Atomics.compareExchange(f(),T>>2,1,0)}function R(T){return l()[T>>>2]+4294967296*f()[T+4>>>2]}function U(T,E,k,C,z,G){return I?Q(17,1,T,E,k,C,z,G):-52}function W(T,E,k,C,z,G){if(I)return Q(18,1,T,E,k,C,z,G)}function Y(T){var E=Ce(T)+1,k=Lt(E);return k&&Ne(T,s(),k,E),k}function te(T,E,k){function C(ge){return(ge=ge.toTimeString().match(/\(([A-Za-z ]+)\)$/))?ge[1]:"GMT"}if(I)return Q(19,1,T,E,k);var z=new Date().getFullYear(),G=new Date(z,0,1),K=new Date(z,6,1);z=G.getTimezoneOffset();var ne=K.getTimezoneOffset(),pe=Math.max(z,ne);f()[T>>2>>>0]=60*pe,f()[E>>2>>>0]=+(z!=ne),T=C(G),E=C(K),T=Y(T),E=Y(E),ne>2>>>0]=T,l()[k+4>>2>>>0]=E):(l()[k>>2>>>0]=E,l()[k+4>>2>>>0]=T)}function Q(T,E){var k=arguments.length-2,C=arguments;return It(()=>{for(var z=jt(8*k),G=z>>3,K=0;K>>0]=ne}return Xt(T,k,z,E)})}t.executeNotifiedProxyingQueue=$,ft=x?()=>{var T=process.hrtime();return 1e3*T[0]+T[1]/1e6}:I?()=>performance.now()-t.__performance_now_clock_drift:()=>performance.now();var le,Te=[],Le={};function $e(){if(!le){var T,E={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:(typeof navigator=="object"&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:w||"./this.program"};for(T in Le)Le[T]===void 0?delete E[T]:E[T]=Le[T];var k=[];for(T in E)k.push(T+"="+E[T]);le=k}return le}function V(T,E){if(I)return Q(20,1,T,E);var k=0;return $e().forEach(function(C,z){var G=E+k;for(z=l()[T+4*z>>2>>>0]=G,G=0;G>0>>>0]=C.charCodeAt(G);s()[z>>0>>>0]=0,k+=C.length+1}),0}function me(T,E){if(I)return Q(21,1,T,E);var k=$e();l()[T>>2>>>0]=k.length;var C=0;return k.forEach(function(z){C+=z.length+1}),l()[E>>2>>>0]=C,0}function Pe(T){return I?Q(22,1,T):52}function We(T,E,k,C){return I?Q(23,1,T,E,k,C):52}function et(T,E,k,C,z){return I?Q(24,1,T,E,k,C,z):70}var $t=[null,[],[]];function Ft(T,E){var k=$t[T];E===0||E===10?((T===1?D:j)(Fe(k,0)),k.length=0):k.push(E)}function zt(T,E,k,C){if(I)return Q(25,1,T,E,k,C);for(var z=0,G=0;G>2>>>0],ne=l()[E+4>>2>>>0];E+=8;for(var pe=0;pe>>0]);z+=ne}return l()[C>>2>>>0]=z,0}var ze=0;function Mt(T){return T%4==0&&(T%100!=0||T%400==0)}var Bt=[31,29,31,30,31,30,31,31,30,31,30,31],Ut=[31,28,31,30,31,30,31,31,30,31,30,31];function Gt(T,E,k,C){function z(q,ye,Me){for(q=typeof q=="number"?q.toString():q||"";q.lengthht?-1:0tt-q.getDate())){q.setDate(q.getDate()+ye);break}ye-=tt-q.getDate()+1,q.setDate(1),11>Me?q.setMonth(Me+1):(q.setMonth(0),q.setFullYear(q.getFullYear()+1))}return Me=new Date(q.getFullYear()+1,0,4),ye=ne(new Date(q.getFullYear(),0,4)),Me=ne(Me),0>=K(ye,q)?0>=K(Me,q)?q.getFullYear()+1:q.getFullYear():q.getFullYear()-1}var ge=f()[C+40>>2>>>0];for(var De in C={Lc:f()[C>>2>>>0],Kc:f()[C+4>>2>>>0],dc:f()[C+8>>2>>>0],jc:f()[C+12>>2>>>0],ec:f()[C+16>>2>>>0],Xb:f()[C+20>>2>>>0],Tb:f()[C+24>>2>>>0],Wb:f()[C+28>>2>>>0],Rc:f()[C+32>>2>>>0],Jc:f()[C+36>>2>>>0],Mc:ge?xe(ge):""},k=xe(k),ge={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"})k=k.replace(new RegExp(De,"g"),ge[De]);var Ke="Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),He="January February March April May June July August September October November December".split(" ");for(De in ge={"%a":function(q){return Ke[q.Tb].substring(0,3)},"%A":function(q){return Ke[q.Tb]},"%b":function(q){return He[q.ec].substring(0,3)},"%B":function(q){return He[q.ec]},"%C":function(q){return G((q.Xb+1900)/100|0,2)},"%d":function(q){return G(q.jc,2)},"%e":function(q){return z(q.jc,2," ")},"%g":function(q){return pe(q).toString().substring(2)},"%G":function(q){return pe(q)},"%H":function(q){return G(q.dc,2)},"%I":function(q){return(q=q.dc)==0?q=12:12q.dc?"AM":"PM"},"%S":function(q){return G(q.Lc,2)},"%t":function(){return" "},"%u":function(q){return q.Tb||7},"%U":function(q){return G(Math.floor((q.Wb+7-q.Tb)/7),2)},"%V":function(q){var ye=Math.floor((q.Wb+7-(q.Tb+6)%7)/7);if(2>=(q.Tb+371-q.Wb-2)%7&&ye++,ye)ye==53&&((Me=(q.Tb+371-q.Wb)%7)==4||Me==3&&Mt(q.Xb)||(ye=1));else{ye=52;var Me=(q.Tb+7-q.Wb-1)%7;(Me==4||Me==5&&Mt(q.Xb%400-1))&&ye++}return G(ye,2)},"%w":function(q){return q.Tb},"%W":function(q){return G(Math.floor((q.Wb+7-(q.Tb+6)%7)/7),2)},"%y":function(q){return(q.Xb+1900).toString().substring(2)},"%Y":function(q){return q.Xb+1900},"%z":function(q){var ye=0<=(q=q.Jc);return q=Math.abs(q)/60,(ye?"+":"-")+("0000"+(q/60*100+q%60)).slice(-4)},"%Z":function(q){return q.Mc},"%%":function(){return"%"}},k=k.replace(/%%/g,"\0\0"),ge)k.includes(De)&&(k=k.replace(new RegExp(De,"g"),ge[De](C)));return De=function(q){var ye=Array(Ce(q)+1);return Ne(q,ye,0,ye.length),ye}(k=k.replace(/\0\0/g,"%")),De.length>E?0:(function(q,ye){s().set(q,ye>>>0)}(De,T),De.length-1)}re.fc();var hn=[null,Re,kt,at,bt,yt,_t,wt,vt,xt,Tt,St,At,Ot,Et,P,M,U,W,te,V,me,Pe,We,et,zt],pn={b:function(T){return Lt(T+24)+24},n:function(T){return(T=new se(T)).uc()||(T.hc(!0),ae--),T.ic(!1),st.push(T),T.sc(),T.vc()},ma:function(T){throw j("Unexpected exception thrown, this is not properly supported - aborting"),be=!0,T},x:function(){he(0);var T=st.pop();if(T.Hc()&&!T.kc()){var E=T.Dc();E&&we(E)(T.Zb),gt(T.Zb)}ie=0},e:function(){var T=ie;if(!T)return ze=0;var E=new se(T);E.cc(T);var k=E.bc();if(!k)return ze=0,T;for(var C=Array.prototype.slice.call(arguments),z=0;z$(C));else if(I)postMessage({targetThread:T,cmd:"processProxyingQueue",queue:C});else{if(!(T=re.Vb[T]))return;T.postMessage({cmd:"processProxyingQueue",queue:C})}return 1},Ea:function(){return-1},Pa:function(T,E){T=new Date(1e3*R(T)),f()[E>>2>>>0]=T.getUTCSeconds(),f()[E+4>>2>>>0]=T.getUTCMinutes(),f()[E+8>>2>>>0]=T.getUTCHours(),f()[E+12>>2>>>0]=T.getUTCDate(),f()[E+16>>2>>>0]=T.getUTCMonth(),f()[E+20>>2>>>0]=T.getUTCFullYear()-1900,f()[E+24>>2>>>0]=T.getUTCDay(),T=(T.getTime()-Date.UTC(T.getUTCFullYear(),0,1,0,0,0,0))/864e5|0,f()[E+28>>2>>>0]=T},Qa:function(T,E){T=new Date(1e3*R(T)),f()[E>>2>>>0]=T.getSeconds(),f()[E+4>>2>>>0]=T.getMinutes(),f()[E+8>>2>>>0]=T.getHours(),f()[E+12>>2>>>0]=T.getDate(),f()[E+16>>2>>>0]=T.getMonth(),f()[E+20>>2>>>0]=T.getFullYear()-1900,f()[E+24>>2>>>0]=T.getDay();var k=new Date(T.getFullYear(),0,1),C=(T.getTime()-k.getTime())/864e5|0;f()[E+28>>2>>>0]=C,f()[E+36>>2>>>0]=-60*T.getTimezoneOffset(),C=new Date(T.getFullYear(),6,1).getTimezoneOffset(),T=0|(C!=(k=k.getTimezoneOffset())&&T.getTimezoneOffset()==Math.min(k,C)),f()[E+32>>2>>>0]=T},Ra:function(T){var E=new Date(f()[T+20>>2>>>0]+1900,f()[T+16>>2>>>0],f()[T+12>>2>>>0],f()[T+8>>2>>>0],f()[T+4>>2>>>0],f()[T>>2>>>0],0),k=f()[T+32>>2>>>0],C=E.getTimezoneOffset(),z=new Date(E.getFullYear(),0,1),G=new Date(E.getFullYear(),6,1).getTimezoneOffset(),K=z.getTimezoneOffset(),ne=Math.min(K,G);return 0>k?f()[T+32>>2>>>0]=+(G!=K&&ne==C):0>2>>>0]=E.getDay(),k=(E.getTime()-z.getTime())/864e5|0,f()[T+28>>2>>>0]=k,f()[T>>2>>>0]=E.getSeconds(),f()[T+4>>2>>>0]=E.getMinutes(),f()[T+8>>2>>>0]=E.getHours(),f()[T+12>>2>>>0]=E.getDate(),f()[T+16>>2>>>0]=E.getMonth(),E.getTime()/1e3|0},Aa:U,Ba:W,Sa:function T(E,k,C){T.Ac||(T.Ac=!0,te(E,k,C))},y:function(){fe("")},U:function(){if(!x&&!O){var T="Blocking on the main thread is very dangerous, see https://emscripten.org/docs/porting/pthreads.html#blocking-on-the-main-browser-thread";ot||(ot={}),ot[T]||(ot[T]=1,x&&(T="warning: "+T),j(T))}},ra:function(){return 4294901760},B:ft,Ia:function(T,E,k){h().copyWithin(T>>>0,E>>>0,E+k>>>0)},F:function(){return x?a(3993).cpus().length:navigator.hardwareConcurrency},Da:function(T,E,k){Te.length=E,k>>=3;for(var C=0;C>>0];return(0>T?Pt[-T-1]:hn[T]).apply(null,Te)},qa:function(T){var E=h().length;if((T>>>=0)<=E||4294901760=k;k*=2){var C=E*(1+.2/k);C=Math.min(C,T+100663296);var z=Math;C=Math.max(T,C),z=z.min.call(z,4294901760,C+(65536-C%65536)%65536);e:{try{X.grow(z-ee.byteLength+65535>>>16),Ee(X.buffer);var G=1;break e}catch{}G=void 0}if(G)return!0}return!1},Na:function(){throw"unwind"},Ga:V,Ha:me,J:it,I:Pe,S:We,ga:et,R:zt,d:function(){return ze},na:function T(E,k){T.lc||(T.lc=function(){if(typeof crypto=="object"&&typeof crypto.getRandomValues=="function"){var z=new Uint8Array(1);return()=>(crypto.getRandomValues(z),z[0])}if(x)try{var G=a(Object(function(){var K=new Error("Cannot find module 'crypto'");throw K.code="MODULE_NOT_FOUND",K}()));return()=>G.randomBytes(1)[0]}catch{}return()=>fe("randomDevice")}());for(var C=0;C>0>>>0]=T.lc();return 0},ia:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(z){if(ce(C),z!==z+0)throw z;he(1,0)}},ja:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(z){if(ce(C),z!==z+0)throw z;he(1,0)}},K:function(T){var E=de();try{return we(T)()}catch(k){if(ce(E),k!==k+0)throw k;he(1,0)}},f:function(T,E){var k=de();try{return we(T)(E)}catch(C){if(ce(k),C!==C+0)throw C;he(1,0)}},P:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(z){if(ce(C),z!==z+0)throw z;he(1,0)}},Q:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(z){if(ce(C),z!==z+0)throw z;he(1,0)}},k:function(T,E,k){var C=de();try{return we(T)(E,k)}catch(z){if(ce(C),z!==z+0)throw z;he(1,0)}},p:function(T,E,k,C){var z=de();try{return we(T)(E,k,C)}catch(G){if(ce(z),G!==G+0)throw G;he(1,0)}},q:function(T,E,k,C,z){var G=de();try{return we(T)(E,k,C,z)}catch(K){if(ce(G),K!==K+0)throw K;he(1,0)}},N:function(T,E,k,C,z,G){var K=de();try{return we(T)(E,k,C,z,G)}catch(ne){if(ce(K),ne!==ne+0)throw ne;he(1,0)}},s:function(T,E,k,C,z,G){var K=de();try{return we(T)(E,k,C,z,G)}catch(ne){if(ce(K),ne!==ne+0)throw ne;he(1,0)}},w:function(T,E,k,C,z,G,K){var ne=de();try{return we(T)(E,k,C,z,G,K)}catch(pe){if(ce(ne),pe!==pe+0)throw pe;he(1,0)}},L:function(T,E,k,C,z,G,K,ne){var pe=de();try{return we(T)(E,k,C,z,G,K,ne)}catch(ge){if(ce(pe),ge!==ge+0)throw ge;he(1,0)}},E:function(T,E,k,C,z,G,K,ne,pe,ge,De,Ke){var He=de();try{return we(T)(E,k,C,z,G,K,ne,pe,ge,De,Ke)}catch(q){if(ce(He),q!==q+0)throw q;he(1,0)}},aa:function(T,E,k,C,z,G,K,ne){var pe=de();try{return un(T,E,k,C,z,G,K,ne)}catch(ge){if(ce(pe),ge!==ge+0)throw ge;he(1,0)}},_:function(T,E,k,C,z,G,K){var ne=de();try{return en(T,E,k,C,z,G,K)}catch(pe){if(ce(ne),pe!==pe+0)throw pe;he(1,0)}},Z:function(T,E,k,C,z){var G=de();try{return ln(T,E,k,C,z)}catch(K){if(ce(G),K!==K+0)throw K;he(1,0)}},ca:function(T,E,k,C){var z=de();try{return sn(T,E,k,C)}catch(G){if(ce(z),G!==G+0)throw G;he(1,0)}},$:function(T){var E=de();try{return Jt(T)}catch(k){if(ce(E),k!==k+0)throw k;he(1,0)}},ba:function(T,E){var k=de();try{return an(T,E)}catch(C){if(ce(k),C!==C+0)throw C;he(1,0)}},Y:function(T,E,k){var C=de();try{return tn(T,E,k)}catch(z){if(ce(C),z!==z+0)throw z;he(1,0)}},g:function(T){var E=de();try{we(T)()}catch(k){if(ce(E),k!==k+0)throw k;he(1,0)}},r:function(T,E){var k=de();try{we(T)(E)}catch(C){if(ce(k),C!==C+0)throw C;he(1,0)}},i:function(T,E,k){var C=de();try{we(T)(E,k)}catch(z){if(ce(C),z!==z+0)throw z;he(1,0)}},ha:function(T,E,k,C){var z=de();try{we(T)(E,k,C)}catch(G){if(ce(z),G!==G+0)throw G;he(1,0)}},m:function(T,E,k,C){var z=de();try{we(T)(E,k,C)}catch(G){if(ce(z),G!==G+0)throw G;he(1,0)}},v:function(T,E,k,C,z){var G=de();try{we(T)(E,k,C,z)}catch(K){if(ce(G),K!==K+0)throw K;he(1,0)}},u:function(T,E,k,C,z,G){var K=de();try{we(T)(E,k,C,z,G)}catch(ne){if(ce(K),ne!==ne+0)throw ne;he(1,0)}},O:function(T,E,k,C,z,G,K){var ne=de();try{we(T)(E,k,C,z,G,K)}catch(pe){if(ce(ne),pe!==pe+0)throw pe;he(1,0)}},A:function(T,E,k,C,z,G,K,ne){var pe=de();try{we(T)(E,k,C,z,G,K,ne)}catch(ge){if(ce(pe),ge!==ge+0)throw ge;he(1,0)}},ka:function(T,E,k,C,z,G,K,ne,pe){var ge=de();try{we(T)(E,k,C,z,G,K,ne,pe)}catch(De){if(ce(ge),De!==De+0)throw De;he(1,0)}},C:function(T,E,k,C,z,G,K,ne,pe,ge,De){var Ke=de();try{we(T)(E,k,C,z,G,K,ne,pe,ge,De)}catch(He){if(ce(Ke),He!==He+0)throw He;he(1,0)}},D:function(T,E,k,C,z,G,K,ne,pe,ge,De,Ke,He,q,ye,Me){var tt=de();try{we(T)(E,k,C,z,G,K,ne,pe,ge,De,Ke,He,q,ye,Me)}catch(ht){if(ce(tt),ht!==ht+0)throw ht;he(1,0)}},fa:function(T,E,k,C,z,G,K,ne){var pe=de();try{nn(T,E,k,C,z,G,K,ne)}catch(ge){if(ce(pe),ge!==ge+0)throw ge;he(1,0)}},da:function(T,E,k,C,z,G,K,ne,pe,ge,De,Ke){var He=de();try{on(T,E,k,C,z,G,K,ne,pe,ge,De,Ke)}catch(q){if(ce(He),q!==q+0)throw q;he(1,0)}},ea:function(T,E,k,C,z,G){var K=de();try{rn(T,E,k,C,z,G)}catch(ne){if(ce(K),ne!==ne+0)throw ne;he(1,0)}},o:function(T){return T},a:X||t.wasmMemory,G:function(T){ze=T},la:Gt,z:function(T,E,k,C){return Gt(T,E,k,C)}};(function(){function T(z,G){t.asm=z.exports,re.qc.push(t.asm.sb),Be=t.asm.ub,Ve.unshift(t.asm.Va),J=G,I||(je--,t.monitorRunDependencies&&t.monitorRunDependencies(je),je==0&&Ye&&(z=Ye,Ye=null,z()))}function E(z){T(z.instance,z.module)}function k(z){return function(){if(!H&&(A||O)){if(typeof fetch=="function"&&!Ie.startsWith("file://"))return fetch(Ie,{credentials:"same-origin"}).then(function(G){if(!G.ok)throw"failed to load wasm binary file at '"+Ie+"'";return G.arrayBuffer()}).catch(function(){return lt()});if(d)return new Promise(function(G,K){d(Ie,function(ne){G(new Uint8Array(ne))},K)})}return Promise.resolve().then(function(){return lt()})}().then(function(G){return WebAssembly.instantiate(G,C)}).then(function(G){return G}).then(z,function(G){j("failed to asynchronously prepare wasm: "+G),fe(G)})}var C={a:pn};if(I||(je++,t.monitorRunDependencies&&t.monitorRunDependencies(je)),t.instantiateWasm)try{return t.instantiateWasm(C,T)}catch(z){return j("Module.instantiateWasm callback failed with error: "+z),!1}(H||typeof WebAssembly.instantiateStreaming!="function"||pt()||Ie.startsWith("file://")||x||typeof fetch!="function"?k(E):fetch(Ie,{credentials:"same-origin"}).then(function(z){return WebAssembly.instantiateStreaming(z,C).then(E,function(G){return j("wasm streaming compile failed: "+G),j("falling back to ArrayBuffer instantiation"),k(E)})})).catch(r)})(),t.___wasm_call_ctors=function(){return(t.___wasm_call_ctors=t.asm.Va).apply(null,arguments)},t._OrtInit=function(){return(t._OrtInit=t.asm.Wa).apply(null,arguments)},t._OrtCreateSessionOptions=function(){return(t._OrtCreateSessionOptions=t.asm.Xa).apply(null,arguments)},t._OrtAppendExecutionProvider=function(){return(t._OrtAppendExecutionProvider=t.asm.Ya).apply(null,arguments)},t._OrtAddSessionConfigEntry=function(){return(t._OrtAddSessionConfigEntry=t.asm.Za).apply(null,arguments)},t._OrtReleaseSessionOptions=function(){return(t._OrtReleaseSessionOptions=t.asm._a).apply(null,arguments)},t._OrtCreateSession=function(){return(t._OrtCreateSession=t.asm.$a).apply(null,arguments)},t._OrtReleaseSession=function(){return(t._OrtReleaseSession=t.asm.ab).apply(null,arguments)},t._OrtGetInputCount=function(){return(t._OrtGetInputCount=t.asm.bb).apply(null,arguments)},t._OrtGetOutputCount=function(){return(t._OrtGetOutputCount=t.asm.cb).apply(null,arguments)},t._OrtGetInputName=function(){return(t._OrtGetInputName=t.asm.db).apply(null,arguments)},t._OrtGetOutputName=function(){return(t._OrtGetOutputName=t.asm.eb).apply(null,arguments)},t._OrtFree=function(){return(t._OrtFree=t.asm.fb).apply(null,arguments)},t._OrtCreateTensor=function(){return(t._OrtCreateTensor=t.asm.gb).apply(null,arguments)},t._OrtGetTensorData=function(){return(t._OrtGetTensorData=t.asm.hb).apply(null,arguments)},t._OrtReleaseTensor=function(){return(t._OrtReleaseTensor=t.asm.ib).apply(null,arguments)},t._OrtCreateRunOptions=function(){return(t._OrtCreateRunOptions=t.asm.jb).apply(null,arguments)},t._OrtAddRunConfigEntry=function(){return(t._OrtAddRunConfigEntry=t.asm.kb).apply(null,arguments)},t._OrtReleaseRunOptions=function(){return(t._OrtReleaseRunOptions=t.asm.lb).apply(null,arguments)},t._OrtRun=function(){return(t._OrtRun=t.asm.mb).apply(null,arguments)},t._OrtEndProfiling=function(){return(t._OrtEndProfiling=t.asm.nb).apply(null,arguments)};var Dt=t._pthread_self=function(){return(Dt=t._pthread_self=t.asm.ob).apply(null,arguments)},Lt=t._malloc=function(){return(Lt=t._malloc=t.asm.pb).apply(null,arguments)},Vt=t._free=function(){return(Vt=t._free=t.asm.qb).apply(null,arguments)},qt=t._fflush=function(){return(qt=t._fflush=t.asm.rb).apply(null,arguments)};t.__emscripten_tls_init=function(){return(t.__emscripten_tls_init=t.asm.sb).apply(null,arguments)};var Wt=t.___funcs_on_exit=function(){return(Wt=t.___funcs_on_exit=t.asm.tb).apply(null,arguments)},Ht=t.__emscripten_thread_init=function(){return(Ht=t.__emscripten_thread_init=t.asm.vb).apply(null,arguments)};t.__emscripten_thread_crashed=function(){return(t.__emscripten_thread_crashed=t.asm.wb).apply(null,arguments)};var Ct,Xt=t._emscripten_run_in_main_runtime_thread_js=function(){return(Xt=t._emscripten_run_in_main_runtime_thread_js=t.asm.xb).apply(null,arguments)},Yt=t.__emscripten_proxy_execute_task_queue=function(){return(Yt=t.__emscripten_proxy_execute_task_queue=t.asm.yb).apply(null,arguments)},Rt=t.__emscripten_thread_free_data=function(){return(Rt=t.__emscripten_thread_free_data=t.asm.zb).apply(null,arguments)},Kt=t.__emscripten_thread_exit=function(){return(Kt=t.__emscripten_thread_exit=t.asm.Ab).apply(null,arguments)},he=t._setThrew=function(){return(he=t._setThrew=t.asm.Bb).apply(null,arguments)},Zt=t._emscripten_stack_set_limits=function(){return(Zt=t._emscripten_stack_set_limits=t.asm.Cb).apply(null,arguments)},de=t.stackSave=function(){return(de=t.stackSave=t.asm.Db).apply(null,arguments)},ce=t.stackRestore=function(){return(ce=t.stackRestore=t.asm.Eb).apply(null,arguments)},jt=t.stackAlloc=function(){return(jt=t.stackAlloc=t.asm.Fb).apply(null,arguments)},Nt=t.___cxa_can_catch=function(){return(Nt=t.___cxa_can_catch=t.asm.Gb).apply(null,arguments)},Qt=t.___cxa_is_pointer_type=function(){return(Qt=t.___cxa_is_pointer_type=t.asm.Hb).apply(null,arguments)},Jt=t.dynCall_j=function(){return(Jt=t.dynCall_j=t.asm.Ib).apply(null,arguments)},en=t.dynCall_iiiiij=function(){return(en=t.dynCall_iiiiij=t.asm.Jb).apply(null,arguments)},tn=t.dynCall_jii=function(){return(tn=t.dynCall_jii=t.asm.Kb).apply(null,arguments)},nn=t.dynCall_viiiiij=function(){return(nn=t.dynCall_viiiiij=t.asm.Lb).apply(null,arguments)},rn=t.dynCall_vjji=function(){return(rn=t.dynCall_vjji=t.asm.Mb).apply(null,arguments)},on=t.dynCall_viiijjjii=function(){return(on=t.dynCall_viiijjjii=t.asm.Nb).apply(null,arguments)},sn=t.dynCall_iij=function(){return(sn=t.dynCall_iij=t.asm.Ob).apply(null,arguments)},an=t.dynCall_ji=function(){return(an=t.dynCall_ji=t.asm.Pb).apply(null,arguments)},un=t.dynCall_iiiiiij=function(){return(un=t.dynCall_iiiiiij=t.asm.Qb).apply(null,arguments)},ln=t.dynCall_iiij=function(){return(ln=t.dynCall_iiij=t.asm.Rb).apply(null,arguments)};function cn(){function T(){if(!Ct&&(Ct=!0,t.calledRun=!0,!be)&&(I||rt(Ve),e(t),t.onRuntimeInitialized&&t.onRuntimeInitialized(),!I)){if(t.postRun)for(typeof t.postRun=="function"&&(t.postRun=[t.postRun]);t.postRun.length;){var E=t.postRun.shift();Ze.unshift(E)}rt(Ze)}}if(!(0{var u,c=(u=(u=typeof document<"u"&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(p){var s,h,f;p=p||{},s||(s=p!==void 0?p:{}),s.ready=new Promise(function(P,M){h=P,f=M});var l,o,t,e,r,i,d=Object.assign({},s),g="./this.program",m=(P,M)=>{throw M},b=typeof window=="object",_=typeof importScripts=="function",v=typeof process=="object"&&typeof process.versions=="object"&&typeof process.versions.node=="string",w="";v?(w=_?a(908).dirname(w)+"/":"//",i=()=>{r||(e=a(1384),r=a(908))},l=function(P,M){return i(),P=r.normalize(P),e.readFileSync(P,M?void 0:"utf8")},t=P=>((P=l(P,!0)).buffer||(P=new Uint8Array(P)),P),o=(P,M,$)=>{i(),P=r.normalize(P),e.readFile(P,function(R,U){R?$(R):M(U.buffer)})},1{if(x||0{var M=new XMLHttpRequest;return M.open("GET",P,!1),M.send(null),M.responseText},_&&(t=P=>{var M=new XMLHttpRequest;return M.open("GET",P,!1),M.responseType="arraybuffer",M.send(null),new Uint8Array(M.response)}),o=(P,M,$)=>{var R=new XMLHttpRequest;R.open("GET",P,!0),R.responseType="arraybuffer",R.onload=()=>{R.status==200||R.status==0&&R.response?M(R.response):$()},R.onerror=$,R.send(null)});var S,A=s.print||console.log.bind(console),O=s.printErr||console.warn.bind(console);Object.assign(s,d),d=null,s.thisProgram&&(g=s.thisProgram),s.quit&&(m=s.quit),s.wasmBinary&&(S=s.wasmBinary);var x=s.noExitRuntime||!1;typeof WebAssembly!="object"&&Ee("no native wasm support detected");var I,N,B,L,F,H,D=!1,j=typeof TextDecoder<"u"?new TextDecoder("utf8"):void 0;function Z(P,M,$){var R=(M>>>=0)+$;for($=M;P[$]&&!($>=R);)++$;if(16<$-M&&P.buffer&&j)return j.decode(P.subarray(M,$));for(R="";M<$;){var U=P[M++];if(128&U){var W=63&P[M++];if((224&U)==192)R+=String.fromCharCode((31&U)<<6|W);else{var Y=63&P[M++];65536>(U=(240&U)==224?(15&U)<<12|W<<6|Y:(7&U)<<18|W<<12|Y<<6|63&P[M++])?R+=String.fromCharCode(U):(U-=65536,R+=String.fromCharCode(55296|U>>10,56320|1023&U))}}else R+=String.fromCharCode(U)}return R}function X(P,M){return(P>>>=0)?Z(L,P,M):""}function J(P,M,$,R){if(!(0>>=0;R=$+R-1;for(var W=0;W=Y&&(Y=65536+((1023&Y)<<10)|1023&P.charCodeAt(++W)),127>=Y){if($>=R)break;M[$++>>>0]=Y}else{if(2047>=Y){if($+1>=R)break;M[$++>>>0]=192|Y>>6}else{if(65535>=Y){if($+2>=R)break;M[$++>>>0]=224|Y>>12}else{if($+3>=R)break;M[$++>>>0]=240|Y>>18,M[$++>>>0]=128|Y>>12&63}M[$++>>>0]=128|Y>>6&63}M[$++>>>0]=128|63&Y}}return M[$>>>0]=0,$-U}function ee(P){for(var M=0,$=0;$=R?M++:2047>=R?M+=2:55296<=R&&57343>=R?(M+=4,++$):M+=3}return M}function ue(){var P=I.buffer;N=P,s.HEAP8=B=new Int8Array(P),s.HEAP16=new Int16Array(P),s.HEAP32=F=new Int32Array(P),s.HEAPU8=L=new Uint8Array(P),s.HEAPU16=new Uint16Array(P),s.HEAPU32=H=new Uint32Array(P),s.HEAPF32=new Float32Array(P),s.HEAPF64=new Float64Array(P)}var Ae,ve=[],oe=[],_e=[],be=[],ke=0;function Fe(){var P=s.preRun.shift();ve.unshift(P)}var xe,Ne=0,Ce=null;function Ee(P){throw s.onAbort&&s.onAbort(P),O(P="Aborted("+P+")"),D=!0,P=new WebAssembly.RuntimeError(P+". Build with -sASSERTIONS for more info."),f(P),P}function Oe(){return xe.startsWith("data:application/octet-stream;base64,")}if(xe="ort-wasm.wasm",!Oe()){var Be=xe;xe=s.locateFile?s.locateFile(Be,w):w+Be}function Ge(){var P=xe;try{if(P==xe&&S)return new Uint8Array(S);if(t)return t(P);throw"both async and sync fetching of the wasm failed"}catch(M){Ee(M)}}function Ve(P){this.name="ExitStatus",this.message="Program terminated with exit("+P+")",this.status=P}function Xe(P){for(;0>2>>>0]=M},this.Eb=function(){return H[this.zb+4>>2>>>0]},this.Sb=function(M){H[this.zb+8>>2>>>0]=M},this.Wb=function(){return H[this.zb+8>>2>>>0]},this.Tb=function(){F[this.zb>>2>>>0]=0},this.Ib=function(M){B[this.zb+12>>0>>>0]=M?1:0},this.Pb=function(){return B[this.zb+12>>0>>>0]!=0},this.Jb=function(M){B[this.zb+13>>0>>>0]=M?1:0},this.Lb=function(){return B[this.zb+13>>0>>>0]!=0},this.Rb=function(M,$){this.Fb(0),this.Ub(M),this.Sb($),this.Tb(),this.Ib(!1),this.Jb(!1)},this.Nb=function(){F[this.zb>>2>>>0]+=1},this.Xb=function(){var M=F[this.zb>>2>>>0];return F[this.zb>>2>>>0]=M-1,M===1},this.Fb=function(M){H[this.zb+16>>2>>>0]=M},this.Ob=function(){return H[this.zb+16>>2>>>0]},this.Qb=function(){if(mt(this.Eb()))return H[this.Db>>2>>>0];var M=this.Ob();return M!==0?M:this.Db}}function je(P){return ot(new Ie(P).zb)}var Ye=[];function fe(P){var M=Ye[P];return M||(P>=Ye.length&&(Ye.length=P+1),Ye[P]=M=Ae.get(P)),M}function pt(P){var M=ee(P)+1,$=we(M);return $&&J(P,B,$,M),$}var lt={};function Pt(){if(!Qe){var P,M={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:(typeof navigator=="object"&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:g||"./this.program"};for(P in lt)lt[P]===void 0?delete M[P]:M[P]=lt[P];var $=[];for(P in M)$.push(P+"="+M[P]);Qe=$}return Qe}var Qe,ct=[null,[],[]];function dt(P,M){var $=ct[P];M===0||M===10?((P===1?A:O)(Z($,0)),$.length=0):$.push(M)}var Re=0;function it(P){return P%4==0&&(P%100!=0||P%400==0)}var re=[31,29,31,30,31,30,31,31,30,31,30,31],rt=[31,28,31,30,31,30,31,31,30,31,30,31];function It(P,M,$,R){function U(V,me,Pe){for(V=typeof V=="number"?V.toString():V||"";V.lengthet?-1:0We-V.getDate())){V.setDate(V.getDate()+me);break}me-=We-V.getDate()+1,V.setDate(1),11>Pe?V.setMonth(Pe+1):(V.setMonth(0),V.setFullYear(V.getFullYear()+1))}return Pe=new Date(V.getFullYear()+1,0,4),me=te(new Date(V.getFullYear(),0,4)),Pe=te(Pe),0>=Y(me,V)?0>=Y(Pe,V)?V.getFullYear()+1:V.getFullYear():V.getFullYear()-1}var le=F[R+40>>2>>>0];for(var Te in R={$b:F[R>>2>>>0],Zb:F[R+4>>2>>>0],Gb:F[R+8>>2>>>0],Kb:F[R+12>>2>>>0],Hb:F[R+16>>2>>>0],Cb:F[R+20>>2>>>0],Ab:F[R+24>>2>>>0],Bb:F[R+28>>2>>>0],bc:F[R+32>>2>>>0],Yb:F[R+36>>2>>>0],ac:le?X(le):""},$=X($),le={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"})$=$.replace(new RegExp(Te,"g"),le[Te]);var Le="Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),$e="January February March April May June July August September October November December".split(" ");for(Te in le={"%a":function(V){return Le[V.Ab].substring(0,3)},"%A":function(V){return Le[V.Ab]},"%b":function(V){return $e[V.Hb].substring(0,3)},"%B":function(V){return $e[V.Hb]},"%C":function(V){return W((V.Cb+1900)/100|0,2)},"%d":function(V){return W(V.Kb,2)},"%e":function(V){return U(V.Kb,2," ")},"%g":function(V){return Q(V).toString().substring(2)},"%G":function(V){return Q(V)},"%H":function(V){return W(V.Gb,2)},"%I":function(V){return(V=V.Gb)==0?V=12:12V.Gb?"AM":"PM"},"%S":function(V){return W(V.$b,2)},"%t":function(){return" "},"%u":function(V){return V.Ab||7},"%U":function(V){return W(Math.floor((V.Bb+7-V.Ab)/7),2)},"%V":function(V){var me=Math.floor((V.Bb+7-(V.Ab+6)%7)/7);if(2>=(V.Ab+371-V.Bb-2)%7&&me++,me)me==53&&((Pe=(V.Ab+371-V.Bb)%7)==4||Pe==3&&it(V.Cb)||(me=1));else{me=52;var Pe=(V.Ab+7-V.Bb-1)%7;(Pe==4||Pe==5&&it(V.Cb%400-1))&&me++}return W(me,2)},"%w":function(V){return V.Ab},"%W":function(V){return W(Math.floor((V.Bb+7-(V.Ab+6)%7)/7),2)},"%y":function(V){return(V.Cb+1900).toString().substring(2)},"%Y":function(V){return V.Cb+1900},"%z":function(V){var me=0<=(V=V.Yb);return V=Math.abs(V)/60,(me?"+":"-")+("0000"+(V/60*100+V%60)).slice(-4)},"%Z":function(V){return V.ac},"%%":function(){return"%"}},$=$.replace(/%%/g,"\0\0"),le)$.includes(Te)&&($=$.replace(new RegExp(Te,"g"),le[Te](R)));return Te=function(V){var me=Array(ee(V)+1);return J(V,me,0,me.length),me}($=$.replace(/\0\0/g,"%")),Te.length>M?0:(B.set(Te,P>>>0),Te.length-1)}var kt={a:function(P){return we(P+24)+24},m:function(P){return(P=new Ie(P)).Pb()||(P.Ib(!0),qe--),P.Jb(!1),Ze.push(P),P.Nb(),P.Qb()},ia:function(P){throw O("Unexpected exception thrown, this is not properly supported - aborting"),D=!0,P},w:function(){ae(0);var P=Ze.pop();if(P.Xb()&&!P.Lb()){var M=P.Wb();M&&fe(M)(P.Db),je(P.Db)}Ue=0},d:function(){var P=Ue;if(!P)return Re=0;var M=new Ie(P);M.Fb(P);var $=M.Eb();if(!$)return Re=0,P;for(var R=Array.prototype.slice.call(arguments),U=0;U>>2]+4294967296*F[P+4>>>2])),F[M>>2>>>0]=P.getUTCSeconds(),F[M+4>>2>>>0]=P.getUTCMinutes(),F[M+8>>2>>>0]=P.getUTCHours(),F[M+12>>2>>>0]=P.getUTCDate(),F[M+16>>2>>>0]=P.getUTCMonth(),F[M+20>>2>>>0]=P.getUTCFullYear()-1900,F[M+24>>2>>>0]=P.getUTCDay(),F[M+28>>2>>>0]=(P.getTime()-Date.UTC(P.getUTCFullYear(),0,1,0,0,0,0))/864e5|0},Ea:function(P,M){P=new Date(1e3*(H[P>>>2]+4294967296*F[P+4>>>2])),F[M>>2>>>0]=P.getSeconds(),F[M+4>>2>>>0]=P.getMinutes(),F[M+8>>2>>>0]=P.getHours(),F[M+12>>2>>>0]=P.getDate(),F[M+16>>2>>>0]=P.getMonth(),F[M+20>>2>>>0]=P.getFullYear()-1900,F[M+24>>2>>>0]=P.getDay();var $=new Date(P.getFullYear(),0,1);F[M+28>>2>>>0]=(P.getTime()-$.getTime())/864e5|0,F[M+36>>2>>>0]=-60*P.getTimezoneOffset();var R=new Date(P.getFullYear(),6,1).getTimezoneOffset();$=$.getTimezoneOffset(),F[M+32>>2>>>0]=0|(R!=$&&P.getTimezoneOffset()==Math.min($,R))},Fa:function(P){var M=new Date(F[P+20>>2>>>0]+1900,F[P+16>>2>>>0],F[P+12>>2>>>0],F[P+8>>2>>>0],F[P+4>>2>>>0],F[P>>2>>>0],0),$=F[P+32>>2>>>0],R=M.getTimezoneOffset(),U=new Date(M.getFullYear(),0,1),W=new Date(M.getFullYear(),6,1).getTimezoneOffset(),Y=U.getTimezoneOffset(),te=Math.min(Y,W);return 0>$?F[P+32>>2>>>0]=+(W!=Y&&te==R):0<$!=(te==R)&&(W=Math.max(Y,W),M.setTime(M.getTime()+6e4*((0<$?te:W)-R))),F[P+24>>2>>>0]=M.getDay(),F[P+28>>2>>>0]=(M.getTime()-U.getTime())/864e5|0,F[P>>2>>>0]=M.getSeconds(),F[P+4>>2>>>0]=M.getMinutes(),F[P+8>>2>>>0]=M.getHours(),F[P+12>>2>>>0]=M.getDate(),F[P+16>>2>>>0]=M.getMonth(),M.getTime()/1e3|0},sa:function(){return-52},ta:function(){},Ga:function P(M,$,R){P.Vb||(P.Vb=!0,function(U,W,Y){function te($e){return($e=$e.toTimeString().match(/\(([A-Za-z ]+)\)$/))?$e[1]:"GMT"}var Q=new Date().getFullYear(),le=new Date(Q,0,1),Te=new Date(Q,6,1);Q=le.getTimezoneOffset();var Le=Te.getTimezoneOffset();F[U>>2>>>0]=60*Math.max(Q,Le),F[W>>2>>>0]=+(Q!=Le),U=te(le),W=te(Te),U=pt(U),W=pt(W),Le>2>>>0]=U,H[Y+4>>2>>>0]=W):(H[Y>>2>>>0]=W,H[Y+4>>2>>>0]=U)}(M,$,R))},B:function(){Ee("")},ma:function(){return 4294901760},I:v?()=>{var P=process.hrtime();return 1e3*P[0]+P[1]/1e6}:()=>performance.now(),xa:function(P,M,$){L.copyWithin(P>>>0,M>>>0,M+$>>>0)},G:function(P){var M=L.length;if(4294901760<(P>>>=0))return!1;for(var $=1;4>=$;$*=2){var R=M*(1+.2/$);R=Math.min(R,P+100663296);var U=Math;R=Math.max(P,R),U=U.min.call(U,4294901760,R+(65536-R%65536)%65536);e:{try{I.grow(U-N.byteLength+65535>>>16),ue();var W=1;break e}catch{}W=void 0}if(W)return!0}return!1},va:function(P,M){var $=0;return Pt().forEach(function(R,U){var W=M+$;for(U=H[P+4*U>>2>>>0]=W,W=0;W>0>>>0]=R.charCodeAt(W);B[U>>0>>>0]=0,$+=R.length+1}),0},wa:function(P,M){var $=Pt();H[P>>2>>>0]=$.length;var R=0;return $.forEach(function(U){R+=U.length+1}),H[M>>2>>>0]=R,0},ba:function(P){x||0>2>>>0],te=H[M+4>>2>>>0];M+=8;for(var Q=0;Q>>0]);U+=te}return H[R>>2>>>0]=U,0},c:function(){return Re},ja:function P(M,$){P.Mb||(P.Mb=function(){if(typeof crypto=="object"&&typeof crypto.getRandomValues=="function"){var U=new Uint8Array(1);return()=>(crypto.getRandomValues(U),U[0])}if(v)try{var W=a(Object(function(){var Y=new Error("Cannot find module 'crypto'");throw Y.code="MODULE_NOT_FOUND",Y}()));return()=>W.randomBytes(1)[0]}catch{}return()=>Ee("randomDevice")}());for(var R=0;R<$;R++)B[M+R>>0>>>0]=P.Mb();return 0},ea:function(P,M,$){var R=ie();try{return fe(P)(M,$)}catch(U){if(se(R),U!==U+0)throw U;ae(1,0)}},fa:function(P,M,$){var R=ie();try{return fe(P)(M,$)}catch(U){if(se(R),U!==U+0)throw U;ae(1,0)}},J:function(P){var M=ie();try{return fe(P)()}catch($){if(se(M),$!==$+0)throw $;ae(1,0)}},e:function(P,M){var $=ie();try{return fe(P)(M)}catch(R){if(se($),R!==R+0)throw R;ae(1,0)}},N:function(P,M,$){var R=ie();try{return fe(P)(M,$)}catch(U){if(se(R),U!==U+0)throw U;ae(1,0)}},O:function(P,M,$){var R=ie();try{return fe(P)(M,$)}catch(U){if(se(R),U!==U+0)throw U;ae(1,0)}},j:function(P,M,$){var R=ie();try{return fe(P)(M,$)}catch(U){if(se(R),U!==U+0)throw U;ae(1,0)}},o:function(P,M,$,R){var U=ie();try{return fe(P)(M,$,R)}catch(W){if(se(U),W!==W+0)throw W;ae(1,0)}},p:function(P,M,$,R,U){var W=ie();try{return fe(P)(M,$,R,U)}catch(Y){if(se(W),Y!==Y+0)throw Y;ae(1,0)}},M:function(P,M,$,R,U,W){var Y=ie();try{return fe(P)(M,$,R,U,W)}catch(te){if(se(Y),te!==te+0)throw te;ae(1,0)}},r:function(P,M,$,R,U,W){var Y=ie();try{return fe(P)(M,$,R,U,W)}catch(te){if(se(Y),te!==te+0)throw te;ae(1,0)}},v:function(P,M,$,R,U,W,Y){var te=ie();try{return fe(P)(M,$,R,U,W,Y)}catch(Q){if(se(te),Q!==Q+0)throw Q;ae(1,0)}},K:function(P,M,$,R,U,W,Y,te){var Q=ie();try{return fe(P)(M,$,R,U,W,Y,te)}catch(le){if(se(Q),le!==le+0)throw le;ae(1,0)}},D:function(P,M,$,R,U,W,Y,te,Q,le,Te,Le){var $e=ie();try{return fe(P)(M,$,R,U,W,Y,te,Q,le,Te,Le)}catch(V){if(se($e),V!==V+0)throw V;ae(1,0)}},X:function(P,M,$,R,U,W,Y,te){var Q=ie();try{return At(P,M,$,R,U,W,Y,te)}catch(le){if(se(Q),le!==le+0)throw le;ae(1,0)}},V:function(P,M,$,R,U,W,Y){var te=ie();try{return yt(P,M,$,R,U,W,Y)}catch(Q){if(se(te),Q!==Q+0)throw Q;ae(1,0)}},U:function(P,M,$,R,U){var W=ie();try{return Ot(P,M,$,R,U)}catch(Y){if(se(W),Y!==Y+0)throw Y;ae(1,0)}},Z:function(P,M,$,R){var U=ie();try{return Tt(P,M,$,R)}catch(W){if(se(U),W!==W+0)throw W;ae(1,0)}},W:function(P){var M=ie();try{return bt(P)}catch($){if(se(M),$!==$+0)throw $;ae(1,0)}},Y:function(P,M){var $=ie();try{return St(P,M)}catch(R){if(se($),R!==R+0)throw R;ae(1,0)}},T:function(P,M,$){var R=ie();try{return _t(P,M,$)}catch(U){if(se(R),U!==U+0)throw U;ae(1,0)}},f:function(P){var M=ie();try{fe(P)()}catch($){if(se(M),$!==$+0)throw $;ae(1,0)}},q:function(P,M){var $=ie();try{fe(P)(M)}catch(R){if(se($),R!==R+0)throw R;ae(1,0)}},h:function(P,M,$){var R=ie();try{fe(P)(M,$)}catch(U){if(se(R),U!==U+0)throw U;ae(1,0)}},da:function(P,M,$,R){var U=ie();try{fe(P)(M,$,R)}catch(W){if(se(U),W!==W+0)throw W;ae(1,0)}},l:function(P,M,$,R){var U=ie();try{fe(P)(M,$,R)}catch(W){if(se(U),W!==W+0)throw W;ae(1,0)}},t:function(P,M,$,R,U){var W=ie();try{fe(P)(M,$,R,U)}catch(Y){if(se(W),Y!==Y+0)throw Y;ae(1,0)}},u:function(P,M,$,R,U,W){var Y=ie();try{fe(P)(M,$,R,U,W)}catch(te){if(se(Y),te!==te+0)throw te;ae(1,0)}},x:function(P,M,$,R,U,W,Y){var te=ie();try{fe(P)(M,$,R,U,W,Y)}catch(Q){if(se(te),Q!==Q+0)throw Q;ae(1,0)}},z:function(P,M,$,R,U,W,Y,te){var Q=ie();try{fe(P)(M,$,R,U,W,Y,te)}catch(le){if(se(Q),le!==le+0)throw le;ae(1,0)}},ga:function(P,M,$,R,U,W,Y,te,Q){var le=ie();try{fe(P)(M,$,R,U,W,Y,te,Q)}catch(Te){if(se(le),Te!==Te+0)throw Te;ae(1,0)}},A:function(P,M,$,R,U,W,Y,te,Q,le,Te){var Le=ie();try{fe(P)(M,$,R,U,W,Y,te,Q,le,Te)}catch($e){if(se(Le),$e!==$e+0)throw $e;ae(1,0)}},C:function(P,M,$,R,U,W,Y,te,Q,le,Te,Le,$e,V,me,Pe){var We=ie();try{fe(P)(M,$,R,U,W,Y,te,Q,le,Te,Le,$e,V,me,Pe)}catch(et){if(se(We),et!==et+0)throw et;ae(1,0)}},aa:function(P,M,$,R,U,W,Y,te){var Q=ie();try{wt(P,M,$,R,U,W,Y,te)}catch(le){if(se(Q),le!==le+0)throw le;ae(1,0)}},_:function(P,M,$,R,U,W,Y,te,Q,le,Te,Le){var $e=ie();try{xt(P,M,$,R,U,W,Y,te,Q,le,Te,Le)}catch(V){if(se($e),V!==V+0)throw V;ae(1,0)}},$:function(P,M,$,R,U,W){var Y=ie();try{vt(P,M,$,R,U,W)}catch(te){if(se(Y),te!==te+0)throw te;ae(1,0)}},n:function(P){return P},F:function(P){Re=P},ha:It,y:function(P,M,$,R){return It(P,M,$,R)}};(function(){function P(U){s.asm=U.exports,I=s.asm.Ka,ue(),Ae=s.asm.ib,oe.unshift(s.asm.La),Ne--,s.monitorRunDependencies&&s.monitorRunDependencies(Ne),Ne==0&&Ce&&(U=Ce,Ce=null,U())}function M(U){P(U.instance)}function $(U){return function(){if(!S&&(b||_)){if(typeof fetch=="function"&&!xe.startsWith("file://"))return fetch(xe,{credentials:"same-origin"}).then(function(W){if(!W.ok)throw"failed to load wasm binary file at '"+xe+"'";return W.arrayBuffer()}).catch(function(){return Ge()});if(o)return new Promise(function(W,Y){o(xe,function(te){W(new Uint8Array(te))},Y)})}return Promise.resolve().then(function(){return Ge()})}().then(function(W){return WebAssembly.instantiate(W,R)}).then(function(W){return W}).then(U,function(W){O("failed to asynchronously prepare wasm: "+W),Ee(W)})}var R={a:kt};if(Ne++,s.monitorRunDependencies&&s.monitorRunDependencies(Ne),s.instantiateWasm)try{return s.instantiateWasm(R,P)}catch(U){return O("Module.instantiateWasm callback failed with error: "+U),!1}(S||typeof WebAssembly.instantiateStreaming!="function"||Oe()||xe.startsWith("file://")||v||typeof fetch!="function"?$(M):fetch(xe,{credentials:"same-origin"}).then(function(U){return WebAssembly.instantiateStreaming(U,R).then(M,function(W){return O("wasm streaming compile failed: "+W),O("falling back to ArrayBuffer instantiation"),$(M)})})).catch(f)})(),s.___wasm_call_ctors=function(){return(s.___wasm_call_ctors=s.asm.La).apply(null,arguments)},s._OrtInit=function(){return(s._OrtInit=s.asm.Ma).apply(null,arguments)},s._OrtCreateSessionOptions=function(){return(s._OrtCreateSessionOptions=s.asm.Na).apply(null,arguments)},s._OrtAppendExecutionProvider=function(){return(s._OrtAppendExecutionProvider=s.asm.Oa).apply(null,arguments)},s._OrtAddSessionConfigEntry=function(){return(s._OrtAddSessionConfigEntry=s.asm.Pa).apply(null,arguments)},s._OrtReleaseSessionOptions=function(){return(s._OrtReleaseSessionOptions=s.asm.Qa).apply(null,arguments)},s._OrtCreateSession=function(){return(s._OrtCreateSession=s.asm.Ra).apply(null,arguments)},s._OrtReleaseSession=function(){return(s._OrtReleaseSession=s.asm.Sa).apply(null,arguments)},s._OrtGetInputCount=function(){return(s._OrtGetInputCount=s.asm.Ta).apply(null,arguments)},s._OrtGetOutputCount=function(){return(s._OrtGetOutputCount=s.asm.Ua).apply(null,arguments)},s._OrtGetInputName=function(){return(s._OrtGetInputName=s.asm.Va).apply(null,arguments)},s._OrtGetOutputName=function(){return(s._OrtGetOutputName=s.asm.Wa).apply(null,arguments)},s._OrtFree=function(){return(s._OrtFree=s.asm.Xa).apply(null,arguments)},s._OrtCreateTensor=function(){return(s._OrtCreateTensor=s.asm.Ya).apply(null,arguments)},s._OrtGetTensorData=function(){return(s._OrtGetTensorData=s.asm.Za).apply(null,arguments)},s._OrtReleaseTensor=function(){return(s._OrtReleaseTensor=s.asm._a).apply(null,arguments)},s._OrtCreateRunOptions=function(){return(s._OrtCreateRunOptions=s.asm.$a).apply(null,arguments)},s._OrtAddRunConfigEntry=function(){return(s._OrtAddRunConfigEntry=s.asm.ab).apply(null,arguments)},s._OrtReleaseRunOptions=function(){return(s._OrtReleaseRunOptions=s.asm.bb).apply(null,arguments)},s._OrtRun=function(){return(s._OrtRun=s.asm.cb).apply(null,arguments)},s._OrtEndProfiling=function(){return(s._OrtEndProfiling=s.asm.db).apply(null,arguments)};var Je,we=s._malloc=function(){return(we=s._malloc=s.asm.eb).apply(null,arguments)},ot=s._free=function(){return(ot=s._free=s.asm.fb).apply(null,arguments)},ft=s._fflush=function(){return(ft=s._fflush=s.asm.gb).apply(null,arguments)},st=s.___funcs_on_exit=function(){return(st=s.___funcs_on_exit=s.asm.hb).apply(null,arguments)},ae=s._setThrew=function(){return(ae=s._setThrew=s.asm.jb).apply(null,arguments)},ie=s.stackSave=function(){return(ie=s.stackSave=s.asm.kb).apply(null,arguments)},se=s.stackRestore=function(){return(se=s.stackRestore=s.asm.lb).apply(null,arguments)},gt=s.stackAlloc=function(){return(gt=s.stackAlloc=s.asm.mb).apply(null,arguments)},at=s.___cxa_can_catch=function(){return(at=s.___cxa_can_catch=s.asm.nb).apply(null,arguments)},mt=s.___cxa_is_pointer_type=function(){return(mt=s.___cxa_is_pointer_type=s.asm.ob).apply(null,arguments)},bt=s.dynCall_j=function(){return(bt=s.dynCall_j=s.asm.pb).apply(null,arguments)},yt=s.dynCall_iiiiij=function(){return(yt=s.dynCall_iiiiij=s.asm.qb).apply(null,arguments)},_t=s.dynCall_jii=function(){return(_t=s.dynCall_jii=s.asm.rb).apply(null,arguments)},wt=s.dynCall_viiiiij=function(){return(wt=s.dynCall_viiiiij=s.asm.sb).apply(null,arguments)},vt=s.dynCall_vjji=function(){return(vt=s.dynCall_vjji=s.asm.tb).apply(null,arguments)},xt=s.dynCall_viiijjjii=function(){return(xt=s.dynCall_viiijjjii=s.asm.ub).apply(null,arguments)},Tt=s.dynCall_iij=function(){return(Tt=s.dynCall_iij=s.asm.vb).apply(null,arguments)},St=s.dynCall_ji=function(){return(St=s.dynCall_ji=s.asm.wb).apply(null,arguments)},At=s.dynCall_iiiiiij=function(){return(At=s.dynCall_iiiiiij=s.asm.xb).apply(null,arguments)},Ot=s.dynCall_iiij=function(){return(Ot=s.dynCall_iiij=s.asm.yb).apply(null,arguments)};function Et(){function P(){if(!Je&&(Je=!0,s.calledRun=!0,!D)){if(Xe(oe),h(s),s.onRuntimeInitialized&&s.onRuntimeInitialized(),s.postRun)for(typeof s.postRun=="function"&&(s.postRun=[s.postRun]);s.postRun.length;){var M=s.postRun.shift();be.unshift(M)}Xe(be)}}if(!(0{y.exports=function(n,a){for(var u=new Array(arguments.length-1),c=0,p=2,s=!0;p{var a=n;a.length=function(h){var f=h.length;if(!f)return 0;for(var l=0;--f%4>1&&h.charAt(f)==="=";)++l;return Math.ceil(3*h.length)/4-l};for(var u=new Array(64),c=new Array(123),p=0;p<64;)c[u[p]=p<26?p+65:p<52?p+71:p<62?p-4:p-59|43]=p++;a.encode=function(h,f,l){for(var o,t=null,e=[],r=0,i=0;f>2],o=(3&d)<<4,i=1;break;case 1:e[r++]=u[o|d>>4],o=(15&d)<<2,i=2;break;case 2:e[r++]=u[o|d>>6],e[r++]=u[63&d],i=0}r>8191&&((t||(t=[])).push(String.fromCharCode.apply(String,e)),r=0)}return i&&(e[r++]=u[o],e[r++]=61,i===1&&(e[r++]=61)),t?(r&&t.push(String.fromCharCode.apply(String,e.slice(0,r))),t.join("")):String.fromCharCode.apply(String,e.slice(0,r))};var s="invalid encoding";a.decode=function(h,f,l){for(var o,t=l,e=0,r=0;r1)break;if((i=c[i])===void 0)throw Error(s);switch(e){case 0:o=i,e=1;break;case 1:f[l++]=o<<2|(48&i)>>4,o=i,e=2;break;case 2:f[l++]=(15&o)<<4|(60&i)>>2,o=i,e=3;break;case 3:f[l++]=(3&o)<<6|i,e=0}}if(e===1)throw Error(s);return l-t},a.test=function(h){return/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/.test(h)}},9211:y=>{function n(){this._listeners={}}y.exports=n,n.prototype.on=function(a,u,c){return(this._listeners[a]||(this._listeners[a]=[])).push({fn:u,ctx:c||this}),this},n.prototype.off=function(a,u){if(a===void 0)this._listeners={};else if(u===void 0)this._listeners[a]=[];else for(var c=this._listeners[a],p=0;p{function n(s){return typeof Float32Array<"u"?function(){var h=new Float32Array([-0]),f=new Uint8Array(h.buffer),l=f[3]===128;function o(i,d,g){h[0]=i,d[g]=f[0],d[g+1]=f[1],d[g+2]=f[2],d[g+3]=f[3]}function t(i,d,g){h[0]=i,d[g]=f[3],d[g+1]=f[2],d[g+2]=f[1],d[g+3]=f[0]}function e(i,d){return f[0]=i[d],f[1]=i[d+1],f[2]=i[d+2],f[3]=i[d+3],h[0]}function r(i,d){return f[3]=i[d],f[2]=i[d+1],f[1]=i[d+2],f[0]=i[d+3],h[0]}s.writeFloatLE=l?o:t,s.writeFloatBE=l?t:o,s.readFloatLE=l?e:r,s.readFloatBE=l?r:e}():function(){function h(l,o,t,e){var r=o<0?1:0;if(r&&(o=-o),o===0)l(1/o>0?0:2147483648,t,e);else if(isNaN(o))l(2143289344,t,e);else if(o>34028234663852886e22)l((r<<31|2139095040)>>>0,t,e);else if(o<11754943508222875e-54)l((r<<31|Math.round(o/1401298464324817e-60))>>>0,t,e);else{var i=Math.floor(Math.log(o)/Math.LN2);l((r<<31|i+127<<23|8388607&Math.round(o*Math.pow(2,-i)*8388608))>>>0,t,e)}}function f(l,o,t){var e=l(o,t),r=2*(e>>31)+1,i=e>>>23&255,d=8388607&e;return i===255?d?NaN:r*(1/0):i===0?1401298464324817e-60*r*d:r*Math.pow(2,i-150)*(d+8388608)}s.writeFloatLE=h.bind(null,a),s.writeFloatBE=h.bind(null,u),s.readFloatLE=f.bind(null,c),s.readFloatBE=f.bind(null,p)}(),typeof Float64Array<"u"?function(){var h=new Float64Array([-0]),f=new Uint8Array(h.buffer),l=f[7]===128;function o(i,d,g){h[0]=i,d[g]=f[0],d[g+1]=f[1],d[g+2]=f[2],d[g+3]=f[3],d[g+4]=f[4],d[g+5]=f[5],d[g+6]=f[6],d[g+7]=f[7]}function t(i,d,g){h[0]=i,d[g]=f[7],d[g+1]=f[6],d[g+2]=f[5],d[g+3]=f[4],d[g+4]=f[3],d[g+5]=f[2],d[g+6]=f[1],d[g+7]=f[0]}function e(i,d){return f[0]=i[d],f[1]=i[d+1],f[2]=i[d+2],f[3]=i[d+3],f[4]=i[d+4],f[5]=i[d+5],f[6]=i[d+6],f[7]=i[d+7],h[0]}function r(i,d){return f[7]=i[d],f[6]=i[d+1],f[5]=i[d+2],f[4]=i[d+3],f[3]=i[d+4],f[2]=i[d+5],f[1]=i[d+6],f[0]=i[d+7],h[0]}s.writeDoubleLE=l?o:t,s.writeDoubleBE=l?t:o,s.readDoubleLE=l?e:r,s.readDoubleBE=l?r:e}():function(){function h(l,o,t,e,r,i){var d=e<0?1:0;if(d&&(e=-e),e===0)l(0,r,i+o),l(1/e>0?0:2147483648,r,i+t);else if(isNaN(e))l(0,r,i+o),l(2146959360,r,i+t);else if(e>17976931348623157e292)l(0,r,i+o),l((d<<31|2146435072)>>>0,r,i+t);else{var g;if(e<22250738585072014e-324)l((g=e/5e-324)>>>0,r,i+o),l((d<<31|g/4294967296)>>>0,r,i+t);else{var m=Math.floor(Math.log(e)/Math.LN2);m===1024&&(m=1023),l(4503599627370496*(g=e*Math.pow(2,-m))>>>0,r,i+o),l((d<<31|m+1023<<20|1048576*g&1048575)>>>0,r,i+t)}}}function f(l,o,t,e,r){var i=l(e,r+o),d=l(e,r+t),g=2*(d>>31)+1,m=d>>>20&2047,b=4294967296*(1048575&d)+i;return m===2047?b?NaN:g*(1/0):m===0?5e-324*g*b:g*Math.pow(2,m-1075)*(b+4503599627370496)}s.writeDoubleLE=h.bind(null,a,0,4),s.writeDoubleBE=h.bind(null,u,4,0),s.readDoubleLE=f.bind(null,c,0,4),s.readDoubleBE=f.bind(null,p,4,0)}(),s}function a(s,h,f){h[f]=255&s,h[f+1]=s>>>8&255,h[f+2]=s>>>16&255,h[f+3]=s>>>24}function u(s,h,f){h[f]=s>>>24,h[f+1]=s>>>16&255,h[f+2]=s>>>8&255,h[f+3]=255&s}function c(s,h){return(s[h]|s[h+1]<<8|s[h+2]<<16|s[h+3]<<24)>>>0}function p(s,h){return(s[h]<<24|s[h+1]<<16|s[h+2]<<8|s[h+3])>>>0}y.exports=n(n)},7199:module=>{function inquire(moduleName){try{var mod=eval("quire".replace(/^/,"re"))(moduleName);if(mod&&(mod.length||Object.keys(mod).length))return mod}catch(y){}return null}module.exports=inquire},6662:y=>{y.exports=function(n,a,u){var c=u||8192,p=c>>>1,s=null,h=c;return function(f){if(f<1||f>p)return n(f);h+f>c&&(s=n(c),h=0);var l=a.call(s,h,h+=f);return 7&h&&(h=1+(7|h)),l}}},4997:(y,n)=>{var a=n;a.length=function(u){for(var c=0,p=0,s=0;s191&&s<224?f[l++]=(31&s)<<6|63&u[c++]:s>239&&s<365?(s=((7&s)<<18|(63&u[c++])<<12|(63&u[c++])<<6|63&u[c++])-65536,f[l++]=55296+(s>>10),f[l++]=56320+(1023&s)):f[l++]=(15&s)<<12|(63&u[c++])<<6|63&u[c++],l>8191&&((h||(h=[])).push(String.fromCharCode.apply(String,f)),l=0);return h?(l&&h.push(String.fromCharCode.apply(String,f.slice(0,l))),h.join("")):String.fromCharCode.apply(String,f.slice(0,l))},a.write=function(u,c,p){for(var s,h,f=p,l=0;l>6|192,c[p++]=63&s|128):(64512&s)==55296&&(64512&(h=u.charCodeAt(l+1)))==56320?(s=65536+((1023&s)<<10)+(1023&h),++l,c[p++]=s>>18|240,c[p++]=s>>12&63|128,c[p++]=s>>6&63|128,c[p++]=63&s|128):(c[p++]=s>>12|224,c[p++]=s>>6&63|128,c[p++]=63&s|128);return p-f}},3442:(y,n)=>{n.__esModule=!0;var a=function(){function u(c){if(!c)throw new TypeError("Invalid argument; `value` has no value.");this.value=u.EMPTY,c&&u.isGuid(c)&&(this.value=c)}return u.isGuid=function(c){var p=c.toString();return c&&(c instanceof u||u.validator.test(p))},u.create=function(){return new u([u.gen(2),u.gen(1),u.gen(1),u.gen(1),u.gen(3)].join("-"))},u.createEmpty=function(){return new u("emptyguid")},u.parse=function(c){return new u(c)},u.raw=function(){return[u.gen(2),u.gen(1),u.gen(1),u.gen(1),u.gen(3)].join("-")},u.gen=function(c){for(var p="",s=0;s{y.exports=a;var n=null;try{n=new WebAssembly.Instance(new WebAssembly.Module(new Uint8Array([0,97,115,109,1,0,0,0,1,13,2,96,0,1,127,96,4,127,127,127,127,1,127,3,7,6,0,1,1,1,1,1,6,6,1,127,1,65,0,11,7,50,6,3,109,117,108,0,1,5,100,105,118,95,115,0,2,5,100,105,118,95,117,0,3,5,114,101,109,95,115,0,4,5,114,101,109,95,117,0,5,8,103,101,116,95,104,105,103,104,0,0,10,191,1,6,4,0,35,0,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,126,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,127,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,128,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,129,34,4,66,32,135,167,36,0,32,4,167,11,36,1,1,126,32,0,173,32,1,173,66,32,134,132,32,2,173,32,3,173,66,32,134,132,130,34,4,66,32,135,167,36,0,32,4,167,11])),{}).exports}catch{}function a(x,I,N){this.low=0|x,this.high=0|I,this.unsigned=!!N}function u(x){return(x&&x.__isLong__)===!0}a.prototype.__isLong__,Object.defineProperty(a.prototype,"__isLong__",{value:!0}),a.isLong=u;var c={},p={};function s(x,I){var N,B,L;return I?(L=0<=(x>>>=0)&&x<256)&&(B=p[x])?B:(N=f(x,(0|x)<0?-1:0,!0),L&&(p[x]=N),N):(L=-128<=(x|=0)&&x<128)&&(B=c[x])?B:(N=f(x,x<0?-1:0,!1),L&&(c[x]=N),N)}function h(x,I){if(isNaN(x))return I?m:g;if(I){if(x<0)return m;if(x>=r)return S}else{if(x<=-i)return A;if(x+1>=i)return w}return x<0?h(-x,I).neg():f(x%e|0,x/e|0,I)}function f(x,I,N){return new a(x,I,N)}a.fromInt=s,a.fromNumber=h,a.fromBits=f;var l=Math.pow;function o(x,I,N){if(x.length===0)throw Error("empty string");if(x==="NaN"||x==="Infinity"||x==="+Infinity"||x==="-Infinity")return g;if(typeof I=="number"?(N=I,I=!1):I=!!I,(N=N||10)<2||360)throw Error("interior hyphen");if(B===0)return o(x.substring(1),I,N).neg();for(var L=h(l(N,8)),F=g,H=0;H>>0:this.low},O.toNumber=function(){return this.unsigned?(this.high>>>0)*e+(this.low>>>0):this.high*e+(this.low>>>0)},O.toString=function(x){if((x=x||10)<2||36>>0).toString(x);if((F=D).isZero())return j+H;for(;j.length<6;)j="0"+j;H=""+j+H}},O.getHighBits=function(){return this.high},O.getHighBitsUnsigned=function(){return this.high>>>0},O.getLowBits=function(){return this.low},O.getLowBitsUnsigned=function(){return this.low>>>0},O.getNumBitsAbs=function(){if(this.isNegative())return this.eq(A)?64:this.neg().getNumBitsAbs();for(var x=this.high!=0?this.high:this.low,I=31;I>0&&!(x&1<=0},O.isOdd=function(){return(1&this.low)==1},O.isEven=function(){return(1&this.low)==0},O.equals=function(x){return u(x)||(x=t(x)),(this.unsigned===x.unsigned||this.high>>>31!=1||x.high>>>31!=1)&&this.high===x.high&&this.low===x.low},O.eq=O.equals,O.notEquals=function(x){return!this.eq(x)},O.neq=O.notEquals,O.ne=O.notEquals,O.lessThan=function(x){return this.comp(x)<0},O.lt=O.lessThan,O.lessThanOrEqual=function(x){return this.comp(x)<=0},O.lte=O.lessThanOrEqual,O.le=O.lessThanOrEqual,O.greaterThan=function(x){return this.comp(x)>0},O.gt=O.greaterThan,O.greaterThanOrEqual=function(x){return this.comp(x)>=0},O.gte=O.greaterThanOrEqual,O.ge=O.greaterThanOrEqual,O.compare=function(x){if(u(x)||(x=t(x)),this.eq(x))return 0;var I=this.isNegative(),N=x.isNegative();return I&&!N?-1:!I&&N?1:this.unsigned?x.high>>>0>this.high>>>0||x.high===this.high&&x.low>>>0>this.low>>>0?-1:1:this.sub(x).isNegative()?-1:1},O.comp=O.compare,O.negate=function(){return!this.unsigned&&this.eq(A)?A:this.not().add(b)},O.neg=O.negate,O.add=function(x){u(x)||(x=t(x));var I=this.high>>>16,N=65535&this.high,B=this.low>>>16,L=65535&this.low,F=x.high>>>16,H=65535&x.high,D=x.low>>>16,j=0,Z=0,X=0,J=0;return X+=(J+=L+(65535&x.low))>>>16,Z+=(X+=B+D)>>>16,j+=(Z+=N+H)>>>16,j+=I+F,f((X&=65535)<<16|(J&=65535),(j&=65535)<<16|(Z&=65535),this.unsigned)},O.subtract=function(x){return u(x)||(x=t(x)),this.add(x.neg())},O.sub=O.subtract,O.multiply=function(x){if(this.isZero())return g;if(u(x)||(x=t(x)),n)return f(n.mul(this.low,this.high,x.low,x.high),n.get_high(),this.unsigned);if(x.isZero())return g;if(this.eq(A))return x.isOdd()?A:g;if(x.eq(A))return this.isOdd()?A:g;if(this.isNegative())return x.isNegative()?this.neg().mul(x.neg()):this.neg().mul(x).neg();if(x.isNegative())return this.mul(x.neg()).neg();if(this.lt(d)&&x.lt(d))return h(this.toNumber()*x.toNumber(),this.unsigned);var I=this.high>>>16,N=65535&this.high,B=this.low>>>16,L=65535&this.low,F=x.high>>>16,H=65535&x.high,D=x.low>>>16,j=65535&x.low,Z=0,X=0,J=0,ee=0;return J+=(ee+=L*j)>>>16,X+=(J+=B*j)>>>16,J&=65535,X+=(J+=L*D)>>>16,Z+=(X+=N*j)>>>16,X&=65535,Z+=(X+=B*D)>>>16,X&=65535,Z+=(X+=L*H)>>>16,Z+=I*j+N*D+B*H+L*F,f((J&=65535)<<16|(ee&=65535),(Z&=65535)<<16|(X&=65535),this.unsigned)},O.mul=O.multiply,O.divide=function(x){if(u(x)||(x=t(x)),x.isZero())throw Error("division by zero");var I,N,B;if(n)return this.unsigned||this.high!==-2147483648||x.low!==-1||x.high!==-1?f((this.unsigned?n.div_u:n.div_s)(this.low,this.high,x.low,x.high),n.get_high(),this.unsigned):this;if(this.isZero())return this.unsigned?m:g;if(this.unsigned){if(x.unsigned||(x=x.toUnsigned()),x.gt(this))return m;if(x.gt(this.shru(1)))return _;B=m}else{if(this.eq(A))return x.eq(b)||x.eq(v)?A:x.eq(A)?b:(I=this.shr(1).div(x).shl(1)).eq(g)?x.isNegative()?b:v:(N=this.sub(x.mul(I)),B=I.add(N.div(x)));if(x.eq(A))return this.unsigned?m:g;if(this.isNegative())return x.isNegative()?this.neg().div(x.neg()):this.neg().div(x).neg();if(x.isNegative())return this.div(x.neg()).neg();B=g}for(N=this;N.gte(x);){I=Math.max(1,Math.floor(N.toNumber()/x.toNumber()));for(var L=Math.ceil(Math.log(I)/Math.LN2),F=L<=48?1:l(2,L-48),H=h(I),D=H.mul(x);D.isNegative()||D.gt(N);)D=(H=h(I-=F,this.unsigned)).mul(x);H.isZero()&&(H=b),B=B.add(H),N=N.sub(D)}return B},O.div=O.divide,O.modulo=function(x){return u(x)||(x=t(x)),n?f((this.unsigned?n.rem_u:n.rem_s)(this.low,this.high,x.low,x.high),n.get_high(),this.unsigned):this.sub(this.div(x).mul(x))},O.mod=O.modulo,O.rem=O.modulo,O.not=function(){return f(~this.low,~this.high,this.unsigned)},O.and=function(x){return u(x)||(x=t(x)),f(this.low&x.low,this.high&x.high,this.unsigned)},O.or=function(x){return u(x)||(x=t(x)),f(this.low|x.low,this.high|x.high,this.unsigned)},O.xor=function(x){return u(x)||(x=t(x)),f(this.low^x.low,this.high^x.high,this.unsigned)},O.shiftLeft=function(x){return u(x)&&(x=x.toInt()),(x&=63)==0?this:x<32?f(this.low<>>32-x,this.unsigned):f(0,this.low<>>x|this.high<<32-x,this.high>>x,this.unsigned):f(this.high>>x-32,this.high>=0?0:-1,this.unsigned)},O.shr=O.shiftRight,O.shiftRightUnsigned=function(x){if(u(x)&&(x=x.toInt()),(x&=63)==0)return this;var I=this.high;return x<32?f(this.low>>>x|I<<32-x,I>>>x,this.unsigned):f(x===32?I:I>>>x-32,0,this.unsigned)},O.shru=O.shiftRightUnsigned,O.shr_u=O.shiftRightUnsigned,O.toSigned=function(){return this.unsigned?f(this.low,this.high,!1):this},O.toUnsigned=function(){return this.unsigned?this:f(this.low,this.high,!0)},O.toBytes=function(x){return x?this.toBytesLE():this.toBytesBE()},O.toBytesLE=function(){var x=this.high,I=this.low;return[255&I,I>>>8&255,I>>>16&255,I>>>24,255&x,x>>>8&255,x>>>16&255,x>>>24]},O.toBytesBE=function(){var x=this.high,I=this.low;return[x>>>24,x>>>16&255,x>>>8&255,255&x,I>>>24,I>>>16&255,I>>>8&255,255&I]},a.fromBytes=function(x,I,N){return N?a.fromBytesLE(x,I):a.fromBytesBE(x,I)},a.fromBytesLE=function(x,I){return new a(x[0]|x[1]<<8|x[2]<<16|x[3]<<24,x[4]|x[5]<<8|x[6]<<16|x[7]<<24,I)},a.fromBytesBE=function(x,I){return new a(x[4]<<24|x[5]<<16|x[6]<<8|x[7],x[0]<<24|x[1]<<16|x[2]<<8|x[3],I)}},1446:(y,n,a)=>{var u,c,p,s=a(2100),h=s.Reader,f=s.Writer,l=s.util,o=s.roots.default||(s.roots.default={});o.onnx=((p={}).Version=(u={},(c=Object.create(u))[u[0]="_START_VERSION"]=0,c[u[1]="IR_VERSION_2017_10_10"]=1,c[u[2]="IR_VERSION_2017_10_30"]=2,c[u[3]="IR_VERSION_2017_11_3"]=3,c[u[4]="IR_VERSION_2019_1_22"]=4,c[u[5]="IR_VERSION"]=5,c),p.AttributeProto=function(){function t(e){if(this.floats=[],this.ints=[],this.strings=[],this.tensors=[],this.graphs=[],e)for(var r=Object.keys(e),i=0;i>>3){case 1:d.name=e.string();break;case 21:d.refAttrName=e.string();break;case 13:d.docString=e.string();break;case 20:d.type=e.int32();break;case 2:d.f=e.float();break;case 3:d.i=e.int64();break;case 4:d.s=e.bytes();break;case 5:d.t=o.onnx.TensorProto.decode(e,e.uint32());break;case 6:d.g=o.onnx.GraphProto.decode(e,e.uint32());break;case 7:if(d.floats&&d.floats.length||(d.floats=[]),(7&g)==2)for(var m=e.uint32()+e.pos;e.pos>>0,e.i.high>>>0).toNumber())),e.s!=null&&(typeof e.s=="string"?l.base64.decode(e.s,r.s=l.newBuffer(l.base64.length(e.s)),0):e.s.length&&(r.s=e.s)),e.t!=null){if(typeof e.t!="object")throw TypeError(".onnx.AttributeProto.t: object expected");r.t=o.onnx.TensorProto.fromObject(e.t)}if(e.g!=null){if(typeof e.g!="object")throw TypeError(".onnx.AttributeProto.g: object expected");r.g=o.onnx.GraphProto.fromObject(e.g)}if(e.floats){if(!Array.isArray(e.floats))throw TypeError(".onnx.AttributeProto.floats: array expected");r.floats=[];for(var i=0;i>>0,e.ints[i].high>>>0).toNumber())}if(e.strings){if(!Array.isArray(e.strings))throw TypeError(".onnx.AttributeProto.strings: array expected");for(r.strings=[],i=0;i>>0,e.i.high>>>0).toNumber():e.i),e.s!=null&&e.hasOwnProperty("s")&&(i.s=r.bytes===String?l.base64.encode(e.s,0,e.s.length):r.bytes===Array?Array.prototype.slice.call(e.s):e.s),e.t!=null&&e.hasOwnProperty("t")&&(i.t=o.onnx.TensorProto.toObject(e.t,r)),e.g!=null&&e.hasOwnProperty("g")&&(i.g=o.onnx.GraphProto.toObject(e.g,r)),e.floats&&e.floats.length){i.floats=[];for(var g=0;g>>0,e.ints[g].high>>>0).toNumber():e.ints[g];if(e.strings&&e.strings.length)for(i.strings=[],g=0;g>>3){case 1:d.name=e.string();break;case 2:d.type=o.onnx.TypeProto.decode(e,e.uint32());break;case 3:d.docString=e.string();break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.name!=null&&e.hasOwnProperty("name")&&!l.isString(e.name))return"name: string expected";if(e.type!=null&&e.hasOwnProperty("type")){var r=o.onnx.TypeProto.verify(e.type);if(r)return"type."+r}return e.docString!=null&&e.hasOwnProperty("docString")&&!l.isString(e.docString)?"docString: string expected":null},t.fromObject=function(e){if(e instanceof o.onnx.ValueInfoProto)return e;var r=new o.onnx.ValueInfoProto;if(e.name!=null&&(r.name=String(e.name)),e.type!=null){if(typeof e.type!="object")throw TypeError(".onnx.ValueInfoProto.type: object expected");r.type=o.onnx.TypeProto.fromObject(e.type)}return e.docString!=null&&(r.docString=String(e.docString)),r},t.toObject=function(e,r){r||(r={});var i={};return r.defaults&&(i.name="",i.type=null,i.docString=""),e.name!=null&&e.hasOwnProperty("name")&&(i.name=e.name),e.type!=null&&e.hasOwnProperty("type")&&(i.type=o.onnx.TypeProto.toObject(e.type,r)),e.docString!=null&&e.hasOwnProperty("docString")&&(i.docString=e.docString),i},t.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},t}(),p.NodeProto=function(){function t(e){if(this.input=[],this.output=[],this.attribute=[],e)for(var r=Object.keys(e),i=0;i>>3){case 1:d.input&&d.input.length||(d.input=[]),d.input.push(e.string());break;case 2:d.output&&d.output.length||(d.output=[]),d.output.push(e.string());break;case 3:d.name=e.string();break;case 4:d.opType=e.string();break;case 7:d.domain=e.string();break;case 5:d.attribute&&d.attribute.length||(d.attribute=[]),d.attribute.push(o.onnx.AttributeProto.decode(e,e.uint32()));break;case 6:d.docString=e.string();break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.input!=null&&e.hasOwnProperty("input")){if(!Array.isArray(e.input))return"input: array expected";for(var r=0;r>>3){case 1:d.irVersion=e.int64();break;case 8:d.opsetImport&&d.opsetImport.length||(d.opsetImport=[]),d.opsetImport.push(o.onnx.OperatorSetIdProto.decode(e,e.uint32()));break;case 2:d.producerName=e.string();break;case 3:d.producerVersion=e.string();break;case 4:d.domain=e.string();break;case 5:d.modelVersion=e.int64();break;case 6:d.docString=e.string();break;case 7:d.graph=o.onnx.GraphProto.decode(e,e.uint32());break;case 14:d.metadataProps&&d.metadataProps.length||(d.metadataProps=[]),d.metadataProps.push(o.onnx.StringStringEntryProto.decode(e,e.uint32()));break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.irVersion!=null&&e.hasOwnProperty("irVersion")&&!(l.isInteger(e.irVersion)||e.irVersion&&l.isInteger(e.irVersion.low)&&l.isInteger(e.irVersion.high)))return"irVersion: integer|Long expected";if(e.opsetImport!=null&&e.hasOwnProperty("opsetImport")){if(!Array.isArray(e.opsetImport))return"opsetImport: array expected";for(var r=0;r>>0,e.irVersion.high>>>0).toNumber())),e.opsetImport){if(!Array.isArray(e.opsetImport))throw TypeError(".onnx.ModelProto.opsetImport: array expected");r.opsetImport=[];for(var i=0;i>>0,e.modelVersion.high>>>0).toNumber())),e.docString!=null&&(r.docString=String(e.docString)),e.graph!=null){if(typeof e.graph!="object")throw TypeError(".onnx.ModelProto.graph: object expected");r.graph=o.onnx.GraphProto.fromObject(e.graph)}if(e.metadataProps){if(!Array.isArray(e.metadataProps))throw TypeError(".onnx.ModelProto.metadataProps: array expected");for(r.metadataProps=[],i=0;i>>0,e.irVersion.high>>>0).toNumber():e.irVersion),e.producerName!=null&&e.hasOwnProperty("producerName")&&(i.producerName=e.producerName),e.producerVersion!=null&&e.hasOwnProperty("producerVersion")&&(i.producerVersion=e.producerVersion),e.domain!=null&&e.hasOwnProperty("domain")&&(i.domain=e.domain),e.modelVersion!=null&&e.hasOwnProperty("modelVersion")&&(typeof e.modelVersion=="number"?i.modelVersion=r.longs===String?String(e.modelVersion):e.modelVersion:i.modelVersion=r.longs===String?l.Long.prototype.toString.call(e.modelVersion):r.longs===Number?new l.LongBits(e.modelVersion.low>>>0,e.modelVersion.high>>>0).toNumber():e.modelVersion),e.docString!=null&&e.hasOwnProperty("docString")&&(i.docString=e.docString),e.graph!=null&&e.hasOwnProperty("graph")&&(i.graph=o.onnx.GraphProto.toObject(e.graph,r)),e.opsetImport&&e.opsetImport.length){i.opsetImport=[];for(var g=0;g>>3){case 1:d.key=e.string();break;case 2:d.value=e.string();break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){return typeof e!="object"||e===null?"object expected":e.key!=null&&e.hasOwnProperty("key")&&!l.isString(e.key)?"key: string expected":e.value!=null&&e.hasOwnProperty("value")&&!l.isString(e.value)?"value: string expected":null},t.fromObject=function(e){if(e instanceof o.onnx.StringStringEntryProto)return e;var r=new o.onnx.StringStringEntryProto;return e.key!=null&&(r.key=String(e.key)),e.value!=null&&(r.value=String(e.value)),r},t.toObject=function(e,r){r||(r={});var i={};return r.defaults&&(i.key="",i.value=""),e.key!=null&&e.hasOwnProperty("key")&&(i.key=e.key),e.value!=null&&e.hasOwnProperty("value")&&(i.value=e.value),i},t.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},t}(),p.TensorAnnotation=function(){function t(e){if(this.quantParameterTensorNames=[],e)for(var r=Object.keys(e),i=0;i>>3){case 1:d.tensorName=e.string();break;case 2:d.quantParameterTensorNames&&d.quantParameterTensorNames.length||(d.quantParameterTensorNames=[]),d.quantParameterTensorNames.push(o.onnx.StringStringEntryProto.decode(e,e.uint32()));break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.tensorName!=null&&e.hasOwnProperty("tensorName")&&!l.isString(e.tensorName))return"tensorName: string expected";if(e.quantParameterTensorNames!=null&&e.hasOwnProperty("quantParameterTensorNames")){if(!Array.isArray(e.quantParameterTensorNames))return"quantParameterTensorNames: array expected";for(var r=0;r>>3){case 1:d.node&&d.node.length||(d.node=[]),d.node.push(o.onnx.NodeProto.decode(e,e.uint32()));break;case 2:d.name=e.string();break;case 5:d.initializer&&d.initializer.length||(d.initializer=[]),d.initializer.push(o.onnx.TensorProto.decode(e,e.uint32()));break;case 10:d.docString=e.string();break;case 11:d.input&&d.input.length||(d.input=[]),d.input.push(o.onnx.ValueInfoProto.decode(e,e.uint32()));break;case 12:d.output&&d.output.length||(d.output=[]),d.output.push(o.onnx.ValueInfoProto.decode(e,e.uint32()));break;case 13:d.valueInfo&&d.valueInfo.length||(d.valueInfo=[]),d.valueInfo.push(o.onnx.ValueInfoProto.decode(e,e.uint32()));break;case 14:d.quantizationAnnotation&&d.quantizationAnnotation.length||(d.quantizationAnnotation=[]),d.quantizationAnnotation.push(o.onnx.TensorAnnotation.decode(e,e.uint32()));break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.node!=null&&e.hasOwnProperty("node")){if(!Array.isArray(e.node))return"node: array expected";for(var r=0;r>>3){case 1:if(d.dims&&d.dims.length||(d.dims=[]),(7&g)==2)for(var m=e.uint32()+e.pos;e.pos>>0,e.dims[i].high>>>0).toNumber())}if(e.dataType!=null&&(r.dataType=0|e.dataType),e.segment!=null){if(typeof e.segment!="object")throw TypeError(".onnx.TensorProto.segment: object expected");r.segment=o.onnx.TensorProto.Segment.fromObject(e.segment)}if(e.floatData){if(!Array.isArray(e.floatData))throw TypeError(".onnx.TensorProto.floatData: array expected");for(r.floatData=[],i=0;i>>0,e.int64Data[i].high>>>0).toNumber())}if(e.name!=null&&(r.name=String(e.name)),e.docString!=null&&(r.docString=String(e.docString)),e.rawData!=null&&(typeof e.rawData=="string"?l.base64.decode(e.rawData,r.rawData=l.newBuffer(l.base64.length(e.rawData)),0):e.rawData.length&&(r.rawData=e.rawData)),e.externalData){if(!Array.isArray(e.externalData))throw TypeError(".onnx.TensorProto.externalData: array expected");for(r.externalData=[],i=0;i>>0,e.uint64Data[i].high>>>0).toNumber(!0))}return r},t.toObject=function(e,r){r||(r={});var i={};if((r.arrays||r.defaults)&&(i.dims=[],i.floatData=[],i.int32Data=[],i.stringData=[],i.int64Data=[],i.doubleData=[],i.uint64Data=[],i.externalData=[]),r.defaults&&(i.dataType=0,i.segment=null,i.name="",r.bytes===String?i.rawData="":(i.rawData=[],r.bytes!==Array&&(i.rawData=l.newBuffer(i.rawData))),i.docString="",i.dataLocation=r.enums===String?"DEFAULT":0),e.dims&&e.dims.length){i.dims=[];for(var d=0;d>>0,e.dims[d].high>>>0).toNumber():e.dims[d]}if(e.dataType!=null&&e.hasOwnProperty("dataType")&&(i.dataType=e.dataType),e.segment!=null&&e.hasOwnProperty("segment")&&(i.segment=o.onnx.TensorProto.Segment.toObject(e.segment,r)),e.floatData&&e.floatData.length)for(i.floatData=[],d=0;d>>0,e.int64Data[d].high>>>0).toNumber():e.int64Data[d];if(e.name!=null&&e.hasOwnProperty("name")&&(i.name=e.name),e.rawData!=null&&e.hasOwnProperty("rawData")&&(i.rawData=r.bytes===String?l.base64.encode(e.rawData,0,e.rawData.length):r.bytes===Array?Array.prototype.slice.call(e.rawData):e.rawData),e.doubleData&&e.doubleData.length)for(i.doubleData=[],d=0;d>>0,e.uint64Data[d].high>>>0).toNumber(!0):e.uint64Data[d];if(e.docString!=null&&e.hasOwnProperty("docString")&&(i.docString=e.docString),e.externalData&&e.externalData.length)for(i.externalData=[],d=0;d>>3){case 1:g.begin=r.int64();break;case 2:g.end=r.int64();break;default:r.skipType(7&m)}}return g},e.decodeDelimited=function(r){return r instanceof h||(r=new h(r)),this.decode(r,r.uint32())},e.verify=function(r){return typeof r!="object"||r===null?"object expected":r.begin!=null&&r.hasOwnProperty("begin")&&!(l.isInteger(r.begin)||r.begin&&l.isInteger(r.begin.low)&&l.isInteger(r.begin.high))?"begin: integer|Long expected":r.end!=null&&r.hasOwnProperty("end")&&!(l.isInteger(r.end)||r.end&&l.isInteger(r.end.low)&&l.isInteger(r.end.high))?"end: integer|Long expected":null},e.fromObject=function(r){if(r instanceof o.onnx.TensorProto.Segment)return r;var i=new o.onnx.TensorProto.Segment;return r.begin!=null&&(l.Long?(i.begin=l.Long.fromValue(r.begin)).unsigned=!1:typeof r.begin=="string"?i.begin=parseInt(r.begin,10):typeof r.begin=="number"?i.begin=r.begin:typeof r.begin=="object"&&(i.begin=new l.LongBits(r.begin.low>>>0,r.begin.high>>>0).toNumber())),r.end!=null&&(l.Long?(i.end=l.Long.fromValue(r.end)).unsigned=!1:typeof r.end=="string"?i.end=parseInt(r.end,10):typeof r.end=="number"?i.end=r.end:typeof r.end=="object"&&(i.end=new l.LongBits(r.end.low>>>0,r.end.high>>>0).toNumber())),i},e.toObject=function(r,i){i||(i={});var d={};if(i.defaults){if(l.Long){var g=new l.Long(0,0,!1);d.begin=i.longs===String?g.toString():i.longs===Number?g.toNumber():g}else d.begin=i.longs===String?"0":0;l.Long?(g=new l.Long(0,0,!1),d.end=i.longs===String?g.toString():i.longs===Number?g.toNumber():g):d.end=i.longs===String?"0":0}return r.begin!=null&&r.hasOwnProperty("begin")&&(typeof r.begin=="number"?d.begin=i.longs===String?String(r.begin):r.begin:d.begin=i.longs===String?l.Long.prototype.toString.call(r.begin):i.longs===Number?new l.LongBits(r.begin.low>>>0,r.begin.high>>>0).toNumber():r.begin),r.end!=null&&r.hasOwnProperty("end")&&(typeof r.end=="number"?d.end=i.longs===String?String(r.end):r.end:d.end=i.longs===String?l.Long.prototype.toString.call(r.end):i.longs===Number?new l.LongBits(r.end.low>>>0,r.end.high>>>0).toNumber():r.end),d},e.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},e}(),t.DataLocation=function(){var e={},r=Object.create(e);return r[e[0]="DEFAULT"]=0,r[e[1]="EXTERNAL"]=1,r}(),t}(),p.TensorShapeProto=function(){function t(e){if(this.dim=[],e)for(var r=Object.keys(e),i=0;i>>3==1?(d.dim&&d.dim.length||(d.dim=[]),d.dim.push(o.onnx.TensorShapeProto.Dimension.decode(e,e.uint32()))):e.skipType(7&g)}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){if(typeof e!="object"||e===null)return"object expected";if(e.dim!=null&&e.hasOwnProperty("dim")){if(!Array.isArray(e.dim))return"dim: array expected";for(var r=0;r>>3){case 1:m.dimValue=i.int64();break;case 2:m.dimParam=i.string();break;case 3:m.denotation=i.string();break;default:i.skipType(7&b)}}return m},e.decodeDelimited=function(i){return i instanceof h||(i=new h(i)),this.decode(i,i.uint32())},e.verify=function(i){if(typeof i!="object"||i===null)return"object expected";var d={};if(i.dimValue!=null&&i.hasOwnProperty("dimValue")&&(d.value=1,!(l.isInteger(i.dimValue)||i.dimValue&&l.isInteger(i.dimValue.low)&&l.isInteger(i.dimValue.high))))return"dimValue: integer|Long expected";if(i.dimParam!=null&&i.hasOwnProperty("dimParam")){if(d.value===1)return"value: multiple values";if(d.value=1,!l.isString(i.dimParam))return"dimParam: string expected"}return i.denotation!=null&&i.hasOwnProperty("denotation")&&!l.isString(i.denotation)?"denotation: string expected":null},e.fromObject=function(i){if(i instanceof o.onnx.TensorShapeProto.Dimension)return i;var d=new o.onnx.TensorShapeProto.Dimension;return i.dimValue!=null&&(l.Long?(d.dimValue=l.Long.fromValue(i.dimValue)).unsigned=!1:typeof i.dimValue=="string"?d.dimValue=parseInt(i.dimValue,10):typeof i.dimValue=="number"?d.dimValue=i.dimValue:typeof i.dimValue=="object"&&(d.dimValue=new l.LongBits(i.dimValue.low>>>0,i.dimValue.high>>>0).toNumber())),i.dimParam!=null&&(d.dimParam=String(i.dimParam)),i.denotation!=null&&(d.denotation=String(i.denotation)),d},e.toObject=function(i,d){d||(d={});var g={};return d.defaults&&(g.denotation=""),i.dimValue!=null&&i.hasOwnProperty("dimValue")&&(typeof i.dimValue=="number"?g.dimValue=d.longs===String?String(i.dimValue):i.dimValue:g.dimValue=d.longs===String?l.Long.prototype.toString.call(i.dimValue):d.longs===Number?new l.LongBits(i.dimValue.low>>>0,i.dimValue.high>>>0).toNumber():i.dimValue,d.oneofs&&(g.value="dimValue")),i.dimParam!=null&&i.hasOwnProperty("dimParam")&&(g.dimParam=i.dimParam,d.oneofs&&(g.value="dimParam")),i.denotation!=null&&i.hasOwnProperty("denotation")&&(g.denotation=i.denotation),g},e.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},e}(),t}(),p.TypeProto=function(){function t(r){if(r)for(var i=Object.keys(r),d=0;d>>3){case 1:g.tensorType=o.onnx.TypeProto.Tensor.decode(r,r.uint32());break;case 6:g.denotation=r.string();break;default:r.skipType(7&m)}}return g},t.decodeDelimited=function(r){return r instanceof h||(r=new h(r)),this.decode(r,r.uint32())},t.verify=function(r){if(typeof r!="object"||r===null)return"object expected";if(r.tensorType!=null&&r.hasOwnProperty("tensorType")){var i=o.onnx.TypeProto.Tensor.verify(r.tensorType);if(i)return"tensorType."+i}return r.denotation!=null&&r.hasOwnProperty("denotation")&&!l.isString(r.denotation)?"denotation: string expected":null},t.fromObject=function(r){if(r instanceof o.onnx.TypeProto)return r;var i=new o.onnx.TypeProto;if(r.tensorType!=null){if(typeof r.tensorType!="object")throw TypeError(".onnx.TypeProto.tensorType: object expected");i.tensorType=o.onnx.TypeProto.Tensor.fromObject(r.tensorType)}return r.denotation!=null&&(i.denotation=String(r.denotation)),i},t.toObject=function(r,i){i||(i={});var d={};return i.defaults&&(d.denotation=""),r.tensorType!=null&&r.hasOwnProperty("tensorType")&&(d.tensorType=o.onnx.TypeProto.Tensor.toObject(r.tensorType,i),i.oneofs&&(d.value="tensorType")),r.denotation!=null&&r.hasOwnProperty("denotation")&&(d.denotation=r.denotation),d},t.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},t.Tensor=function(){function r(i){if(i)for(var d=Object.keys(i),g=0;g>>3){case 1:m.elemType=i.int32();break;case 2:m.shape=o.onnx.TensorShapeProto.decode(i,i.uint32());break;default:i.skipType(7&b)}}return m},r.decodeDelimited=function(i){return i instanceof h||(i=new h(i)),this.decode(i,i.uint32())},r.verify=function(i){if(typeof i!="object"||i===null)return"object expected";if(i.elemType!=null&&i.hasOwnProperty("elemType")&&!l.isInteger(i.elemType))return"elemType: integer expected";if(i.shape!=null&&i.hasOwnProperty("shape")){var d=o.onnx.TensorShapeProto.verify(i.shape);if(d)return"shape."+d}return null},r.fromObject=function(i){if(i instanceof o.onnx.TypeProto.Tensor)return i;var d=new o.onnx.TypeProto.Tensor;if(i.elemType!=null&&(d.elemType=0|i.elemType),i.shape!=null){if(typeof i.shape!="object")throw TypeError(".onnx.TypeProto.Tensor.shape: object expected");d.shape=o.onnx.TensorShapeProto.fromObject(i.shape)}return d},r.toObject=function(i,d){d||(d={});var g={};return d.defaults&&(g.elemType=0,g.shape=null),i.elemType!=null&&i.hasOwnProperty("elemType")&&(g.elemType=i.elemType),i.shape!=null&&i.hasOwnProperty("shape")&&(g.shape=o.onnx.TensorShapeProto.toObject(i.shape,d)),g},r.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},r}(),t}(),p.OperatorSetIdProto=function(){function t(e){if(e)for(var r=Object.keys(e),i=0;i>>3){case 1:d.domain=e.string();break;case 2:d.version=e.int64();break;default:e.skipType(7&g)}}return d},t.decodeDelimited=function(e){return e instanceof h||(e=new h(e)),this.decode(e,e.uint32())},t.verify=function(e){return typeof e!="object"||e===null?"object expected":e.domain!=null&&e.hasOwnProperty("domain")&&!l.isString(e.domain)?"domain: string expected":e.version!=null&&e.hasOwnProperty("version")&&!(l.isInteger(e.version)||e.version&&l.isInteger(e.version.low)&&l.isInteger(e.version.high))?"version: integer|Long expected":null},t.fromObject=function(e){if(e instanceof o.onnx.OperatorSetIdProto)return e;var r=new o.onnx.OperatorSetIdProto;return e.domain!=null&&(r.domain=String(e.domain)),e.version!=null&&(l.Long?(r.version=l.Long.fromValue(e.version)).unsigned=!1:typeof e.version=="string"?r.version=parseInt(e.version,10):typeof e.version=="number"?r.version=e.version:typeof e.version=="object"&&(r.version=new l.LongBits(e.version.low>>>0,e.version.high>>>0).toNumber())),r},t.toObject=function(e,r){r||(r={});var i={};if(r.defaults)if(i.domain="",l.Long){var d=new l.Long(0,0,!1);i.version=r.longs===String?d.toString():r.longs===Number?d.toNumber():d}else i.version=r.longs===String?"0":0;return e.domain!=null&&e.hasOwnProperty("domain")&&(i.domain=e.domain),e.version!=null&&e.hasOwnProperty("version")&&(typeof e.version=="number"?i.version=r.longs===String?String(e.version):e.version:i.version=r.longs===String?l.Long.prototype.toString.call(e.version):r.longs===Number?new l.LongBits(e.version.low>>>0,e.version.high>>>0).toNumber():e.version),i},t.prototype.toJSON=function(){return this.constructor.toObject(this,s.util.toJSONOptions)},t}(),p),y.exports=o},2100:(y,n,a)=>{y.exports=a(9482)},9482:(y,n,a)=>{var u=n;function c(){u.util._configure(),u.Writer._configure(u.BufferWriter),u.Reader._configure(u.BufferReader)}u.build="minimal",u.Writer=a(1173),u.BufferWriter=a(3155),u.Reader=a(1408),u.BufferReader=a(593),u.util=a(9693),u.rpc=a(5994),u.roots=a(5054),u.configure=c,c()},1408:(y,n,a)=>{y.exports=f;var u,c=a(9693),p=c.LongBits,s=c.utf8;function h(d,g){return RangeError("index out of range: "+d.pos+" + "+(g||1)+" > "+d.len)}function f(d){this.buf=d,this.pos=0,this.len=d.length}var l,o=typeof Uint8Array<"u"?function(d){if(d instanceof Uint8Array||Array.isArray(d))return new f(d);throw Error("illegal buffer")}:function(d){if(Array.isArray(d))return new f(d);throw Error("illegal buffer")},t=function(){return c.Buffer?function(d){return(f.create=function(g){return c.Buffer.isBuffer(g)?new u(g):o(g)})(d)}:o};function e(){var d=new p(0,0),g=0;if(!(this.len-this.pos>4)){for(;g<3;++g){if(this.pos>=this.len)throw h(this);if(d.lo=(d.lo|(127&this.buf[this.pos])<<7*g)>>>0,this.buf[this.pos++]<128)return d}return d.lo=(d.lo|(127&this.buf[this.pos++])<<7*g)>>>0,d}for(;g<4;++g)if(d.lo=(d.lo|(127&this.buf[this.pos])<<7*g)>>>0,this.buf[this.pos++]<128)return d;if(d.lo=(d.lo|(127&this.buf[this.pos])<<28)>>>0,d.hi=(d.hi|(127&this.buf[this.pos])>>4)>>>0,this.buf[this.pos++]<128)return d;if(g=0,this.len-this.pos>4){for(;g<5;++g)if(d.hi=(d.hi|(127&this.buf[this.pos])<<7*g+3)>>>0,this.buf[this.pos++]<128)return d}else for(;g<5;++g){if(this.pos>=this.len)throw h(this);if(d.hi=(d.hi|(127&this.buf[this.pos])<<7*g+3)>>>0,this.buf[this.pos++]<128)return d}throw Error("invalid varint encoding")}function r(d,g){return(d[g-4]|d[g-3]<<8|d[g-2]<<16|d[g-1]<<24)>>>0}function i(){if(this.pos+8>this.len)throw h(this,8);return new p(r(this.buf,this.pos+=4),r(this.buf,this.pos+=4))}f.create=t(),f.prototype._slice=c.Array.prototype.subarray||c.Array.prototype.slice,f.prototype.uint32=(l=4294967295,function(){if(l=(127&this.buf[this.pos])>>>0,this.buf[this.pos++]<128||(l=(l|(127&this.buf[this.pos])<<7)>>>0,this.buf[this.pos++]<128)||(l=(l|(127&this.buf[this.pos])<<14)>>>0,this.buf[this.pos++]<128)||(l=(l|(127&this.buf[this.pos])<<21)>>>0,this.buf[this.pos++]<128)||(l=(l|(15&this.buf[this.pos])<<28)>>>0,this.buf[this.pos++]<128))return l;if((this.pos+=5)>this.len)throw this.pos=this.len,h(this,10);return l}),f.prototype.int32=function(){return 0|this.uint32()},f.prototype.sint32=function(){var d=this.uint32();return d>>>1^-(1&d)|0},f.prototype.bool=function(){return this.uint32()!==0},f.prototype.fixed32=function(){if(this.pos+4>this.len)throw h(this,4);return r(this.buf,this.pos+=4)},f.prototype.sfixed32=function(){if(this.pos+4>this.len)throw h(this,4);return 0|r(this.buf,this.pos+=4)},f.prototype.float=function(){if(this.pos+4>this.len)throw h(this,4);var d=c.float.readFloatLE(this.buf,this.pos);return this.pos+=4,d},f.prototype.double=function(){if(this.pos+8>this.len)throw h(this,4);var d=c.float.readDoubleLE(this.buf,this.pos);return this.pos+=8,d},f.prototype.bytes=function(){var d=this.uint32(),g=this.pos,m=this.pos+d;if(m>this.len)throw h(this,d);return this.pos+=d,Array.isArray(this.buf)?this.buf.slice(g,m):g===m?new this.buf.constructor(0):this._slice.call(this.buf,g,m)},f.prototype.string=function(){var d=this.bytes();return s.read(d,0,d.length)},f.prototype.skip=function(d){if(typeof d=="number"){if(this.pos+d>this.len)throw h(this,d);this.pos+=d}else do if(this.pos>=this.len)throw h(this);while(128&this.buf[this.pos++]);return this},f.prototype.skipType=function(d){switch(d){case 0:this.skip();break;case 1:this.skip(8);break;case 2:this.skip(this.uint32());break;case 3:for(;(d=7&this.uint32())!=4;)this.skipType(d);break;case 5:this.skip(4);break;default:throw Error("invalid wire type "+d+" at offset "+this.pos)}return this},f._configure=function(d){u=d,f.create=t(),u._configure();var g=c.Long?"toLong":"toNumber";c.merge(f.prototype,{int64:function(){return e.call(this)[g](!1)},uint64:function(){return e.call(this)[g](!0)},sint64:function(){return e.call(this).zzDecode()[g](!1)},fixed64:function(){return i.call(this)[g](!0)},sfixed64:function(){return i.call(this)[g](!1)}})}},593:(y,n,a)=>{y.exports=p;var u=a(1408);(p.prototype=Object.create(u.prototype)).constructor=p;var c=a(9693);function p(s){u.call(this,s)}p._configure=function(){c.Buffer&&(p.prototype._slice=c.Buffer.prototype.slice)},p.prototype.string=function(){var s=this.uint32();return this.buf.utf8Slice?this.buf.utf8Slice(this.pos,this.pos=Math.min(this.pos+s,this.len)):this.buf.toString("utf-8",this.pos,this.pos=Math.min(this.pos+s,this.len))},p._configure()},5054:y=>{y.exports={}},5994:(y,n,a)=>{n.Service=a(7948)},7948:(y,n,a)=>{y.exports=c;var u=a(9693);function c(p,s,h){if(typeof p!="function")throw TypeError("rpcImpl must be a function");u.EventEmitter.call(this),this.rpcImpl=p,this.requestDelimited=!!s,this.responseDelimited=!!h}(c.prototype=Object.create(u.EventEmitter.prototype)).constructor=c,c.prototype.rpcCall=function p(s,h,f,l,o){if(!l)throw TypeError("request must be specified");var t=this;if(!o)return u.asPromise(p,t,s,h,f,l);if(t.rpcImpl)try{return t.rpcImpl(s,h[t.requestDelimited?"encodeDelimited":"encode"](l).finish(),function(e,r){if(e)return t.emit("error",e,s),o(e);if(r!==null){if(!(r instanceof f))try{r=f[t.responseDelimited?"decodeDelimited":"decode"](r)}catch(i){return t.emit("error",i,s),o(i)}return t.emit("data",r,s),o(null,r)}t.end(!0)})}catch(e){return t.emit("error",e,s),void setTimeout(function(){o(e)},0)}else setTimeout(function(){o(Error("already ended"))},0)},c.prototype.end=function(p){return this.rpcImpl&&(p||this.rpcImpl(null,null,null),this.rpcImpl=null,this.emit("end").off()),this}},1945:(y,n,a)=>{y.exports=c;var u=a(9693);function c(f,l){this.lo=f>>>0,this.hi=l>>>0}var p=c.zero=new c(0,0);p.toNumber=function(){return 0},p.zzEncode=p.zzDecode=function(){return this},p.length=function(){return 1};var s=c.zeroHash="\0\0\0\0\0\0\0\0";c.fromNumber=function(f){if(f===0)return p;var l=f<0;l&&(f=-f);var o=f>>>0,t=(f-o)/4294967296>>>0;return l&&(t=~t>>>0,o=~o>>>0,++o>4294967295&&(o=0,++t>4294967295&&(t=0))),new c(o,t)},c.from=function(f){if(typeof f=="number")return c.fromNumber(f);if(u.isString(f)){if(!u.Long)return c.fromNumber(parseInt(f,10));f=u.Long.fromString(f)}return f.low||f.high?new c(f.low>>>0,f.high>>>0):p},c.prototype.toNumber=function(f){if(!f&&this.hi>>>31){var l=1+~this.lo>>>0,o=~this.hi>>>0;return l||(o=o+1>>>0),-(l+4294967296*o)}return this.lo+4294967296*this.hi},c.prototype.toLong=function(f){return u.Long?new u.Long(0|this.lo,0|this.hi,!!f):{low:0|this.lo,high:0|this.hi,unsigned:!!f}};var h=String.prototype.charCodeAt;c.fromHash=function(f){return f===s?p:new c((h.call(f,0)|h.call(f,1)<<8|h.call(f,2)<<16|h.call(f,3)<<24)>>>0,(h.call(f,4)|h.call(f,5)<<8|h.call(f,6)<<16|h.call(f,7)<<24)>>>0)},c.prototype.toHash=function(){return String.fromCharCode(255&this.lo,this.lo>>>8&255,this.lo>>>16&255,this.lo>>>24,255&this.hi,this.hi>>>8&255,this.hi>>>16&255,this.hi>>>24)},c.prototype.zzEncode=function(){var f=this.hi>>31;return this.hi=((this.hi<<1|this.lo>>>31)^f)>>>0,this.lo=(this.lo<<1^f)>>>0,this},c.prototype.zzDecode=function(){var f=-(1&this.lo);return this.lo=((this.lo>>>1|this.hi<<31)^f)>>>0,this.hi=(this.hi>>>1^f)>>>0,this},c.prototype.length=function(){var f=this.lo,l=(this.lo>>>28|this.hi<<4)>>>0,o=this.hi>>>24;return o===0?l===0?f<16384?f<128?1:2:f<2097152?3:4:l<16384?l<128?5:6:l<2097152?7:8:o<128?9:10}},9693:function(y,n,a){var u=n;function c(s,h,f){for(var l=Object.keys(h),o=0;o0)},u.Buffer=function(){try{var s=u.inquire("buffer").Buffer;return s.prototype.utf8Write?s:null}catch{return null}}(),u._Buffer_from=null,u._Buffer_allocUnsafe=null,u.newBuffer=function(s){return typeof s=="number"?u.Buffer?u._Buffer_allocUnsafe(s):new u.Array(s):u.Buffer?u._Buffer_from(s):typeof Uint8Array>"u"?s:new Uint8Array(s)},u.Array=typeof Uint8Array<"u"?Uint8Array:Array,u.Long=u.global.dcodeIO&&u.global.dcodeIO.Long||u.global.Long||u.inquire("long"),u.key2Re=/^true|false|0|1$/,u.key32Re=/^-?(?:0|[1-9][0-9]*)$/,u.key64Re=/^(?:[\\x00-\\xff]{8}|-?(?:0|[1-9][0-9]*))$/,u.longToHash=function(s){return s?u.LongBits.from(s).toHash():u.LongBits.zeroHash},u.longFromHash=function(s,h){var f=u.LongBits.fromHash(s);return u.Long?u.Long.fromBits(f.lo,f.hi,h):f.toNumber(!!h)},u.merge=c,u.lcFirst=function(s){return s.charAt(0).toLowerCase()+s.substring(1)},u.newError=p,u.ProtocolError=p("ProtocolError"),u.oneOfGetter=function(s){for(var h={},f=0;f-1;--o)if(h[l[o]]===1&&this[l[o]]!==void 0&&this[l[o]]!==null)return l[o]}},u.oneOfSetter=function(s){return function(h){for(var f=0;f{y.exports=t;var u,c=a(9693),p=c.LongBits,s=c.base64,h=c.utf8;function f(b,_,v){this.fn=b,this.len=_,this.next=void 0,this.val=v}function l(){}function o(b){this.head=b.head,this.tail=b.tail,this.len=b.len,this.next=b.states}function t(){this.len=0,this.head=new f(l,0,0),this.tail=this.head,this.states=null}var e=function(){return c.Buffer?function(){return(t.create=function(){return new u})()}:function(){return new t}};function r(b,_,v){_[v]=255&b}function i(b,_){this.len=b,this.next=void 0,this.val=_}function d(b,_,v){for(;b.hi;)_[v++]=127&b.lo|128,b.lo=(b.lo>>>7|b.hi<<25)>>>0,b.hi>>>=7;for(;b.lo>127;)_[v++]=127&b.lo|128,b.lo=b.lo>>>7;_[v++]=b.lo}function g(b,_,v){_[v]=255&b,_[v+1]=b>>>8&255,_[v+2]=b>>>16&255,_[v+3]=b>>>24}t.create=e(),t.alloc=function(b){return new c.Array(b)},c.Array!==Array&&(t.alloc=c.pool(t.alloc,c.Array.prototype.subarray)),t.prototype._push=function(b,_,v){return this.tail=this.tail.next=new f(b,_,v),this.len+=_,this},i.prototype=Object.create(f.prototype),i.prototype.fn=function(b,_,v){for(;b>127;)_[v++]=127&b|128,b>>>=7;_[v]=b},t.prototype.uint32=function(b){return this.len+=(this.tail=this.tail.next=new i((b>>>=0)<128?1:b<16384?2:b<2097152?3:b<268435456?4:5,b)).len,this},t.prototype.int32=function(b){return b<0?this._push(d,10,p.fromNumber(b)):this.uint32(b)},t.prototype.sint32=function(b){return this.uint32((b<<1^b>>31)>>>0)},t.prototype.uint64=function(b){var _=p.from(b);return this._push(d,_.length(),_)},t.prototype.int64=t.prototype.uint64,t.prototype.sint64=function(b){var _=p.from(b).zzEncode();return this._push(d,_.length(),_)},t.prototype.bool=function(b){return this._push(r,1,b?1:0)},t.prototype.fixed32=function(b){return this._push(g,4,b>>>0)},t.prototype.sfixed32=t.prototype.fixed32,t.prototype.fixed64=function(b){var _=p.from(b);return this._push(g,4,_.lo)._push(g,4,_.hi)},t.prototype.sfixed64=t.prototype.fixed64,t.prototype.float=function(b){return this._push(c.float.writeFloatLE,4,b)},t.prototype.double=function(b){return this._push(c.float.writeDoubleLE,8,b)};var m=c.Array.prototype.set?function(b,_,v){_.set(b,v)}:function(b,_,v){for(var w=0;w>>0;if(!_)return this._push(r,1,0);if(c.isString(b)){var v=t.alloc(_=s.length(b));s.decode(b,v,0),b=v}return this.uint32(_)._push(m,_,b)},t.prototype.string=function(b){var _=h.length(b);return _?this.uint32(_)._push(h.write,_,b):this._push(r,1,0)},t.prototype.fork=function(){return this.states=new o(this),this.head=this.tail=new f(l,0,0),this.len=0,this},t.prototype.reset=function(){return this.states?(this.head=this.states.head,this.tail=this.states.tail,this.len=this.states.len,this.states=this.states.next):(this.head=this.tail=new f(l,0,0),this.len=0),this},t.prototype.ldelim=function(){var b=this.head,_=this.tail,v=this.len;return this.reset().uint32(v),v&&(this.tail.next=b.next,this.tail=_,this.len+=v),this},t.prototype.finish=function(){for(var b=this.head.next,_=this.constructor.alloc(this.len),v=0;b;)b.fn(b.val,_,v),v+=b.len,b=b.next;return _},t._configure=function(b){u=b,t.create=e(),u._configure()}},3155:(y,n,a)=>{y.exports=p;var u=a(1173);(p.prototype=Object.create(u.prototype)).constructor=p;var c=a(9693);function p(){u.call(this)}function s(h,f,l){h.length<40?c.utf8.write(h,f,l):f.utf8Write?f.utf8Write(h,l):f.write(h,l)}p._configure=function(){p.alloc=c._Buffer_allocUnsafe,p.writeBytesBuffer=c.Buffer&&c.Buffer.prototype instanceof Uint8Array&&c.Buffer.prototype.set.name==="set"?function(h,f,l){f.set(h,l)}:function(h,f,l){if(h.copy)h.copy(f,l,0,h.length);else for(var o=0;o>>0;return this.uint32(f),f&&this._push(p.writeBytesBuffer,f,h),this},p.prototype.string=function(h){var f=c.Buffer.byteLength(h);return this.uint32(f),f&&this._push(s,f,h),this},p._configure()},7714:(y,n,a)=>{n.R=void 0;const u=a(6919),c=a(7448);n.R=new class{async init(){}async createSessionHandler(p,s){const h=new u.Session(s);return await h.loadModel(p),new c.OnnxjsSessionHandler(h)}}},4200:(y,n,a)=>{n.c8=n.rX=void 0;const u=a(1670),c=a(5381),p=a(2157),s=a(2306);n.rX=()=>{if((typeof u.env.wasm.initTimeout!="number"||u.env.wasm.initTimeout<0)&&(u.env.wasm.initTimeout=0),typeof u.env.wasm.simd!="boolean"&&(u.env.wasm.simd=!0),typeof u.env.wasm.proxy!="boolean"&&(u.env.wasm.proxy=!1),typeof u.env.wasm.numThreads!="number"||!Number.isInteger(u.env.wasm.numThreads)||u.env.wasm.numThreads<=0){const h=typeof navigator>"u"?(0,c.cpus)().length:navigator.hardwareConcurrency;u.env.wasm.numThreads=Math.min(4,Math.ceil((h||1)/2))}},n.c8=new class{async init(){(0,n.rX)(),await(0,p.initWasm)()}async createSessionHandler(h,f){const l=new s.OnnxruntimeWebAssemblySessionHandler;return await l.loadModel(h,f),Promise.resolve(l)}}},6018:function(y,n,a){var u=this&&this.__createBinding||(Object.create?function(s,h,f,l){l===void 0&&(l=f);var o=Object.getOwnPropertyDescriptor(h,f);o&&!("get"in o?!h.__esModule:o.writable||o.configurable)||(o={enumerable:!0,get:function(){return h[f]}}),Object.defineProperty(s,l,o)}:function(s,h,f,l){l===void 0&&(l=f),s[l]=h[f]}),c=this&&this.__exportStar||function(s,h){for(var f in s)f==="default"||Object.prototype.hasOwnProperty.call(h,f)||u(h,s,f)};Object.defineProperty(n,"__esModule",{value:!0}),c(a(1670),n);const p=a(1670);{const s=a(7714).R;(0,p.registerBackend)("webgl",s,-10)}{const s=a(4200).c8;(0,p.registerBackend)("cpu",s,10),(0,p.registerBackend)("wasm",s,10),(0,p.registerBackend)("xnnpack",s,9)}},246:(y,n)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createAttributeWithCacheKey=void 0;class a{constructor(c){Object.assign(this,c)}get cacheKey(){return this._cacheKey||(this._cacheKey=Object.getOwnPropertyNames(this).sort().map(c=>`${this[c]}`).join(";")),this._cacheKey}}n.createAttributeWithCacheKey=u=>new a(u)},7778:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.Attribute=void 0;const u=a(1446),c=a(9395),p=a(9162),s=a(2517);var h=c.onnxruntime.experimental.fbs;class f{constructor(o){if(this._attributes=new Map,o!=null){for(const t of o)t instanceof u.onnx.AttributeProto?this._attributes.set(t.name,[f.getValue(t),f.getType(t)]):t instanceof h.Attribute&&this._attributes.set(t.name(),[f.getValue(t),f.getType(t)]);if(this._attributes.sizep.Tensor.fromProto(r));if(o instanceof h.Attribute)return e.map(r=>p.Tensor.fromOrtTensor(r))}if(t===u.onnx.AttributeProto.AttributeType.STRING&&o instanceof u.onnx.AttributeProto){const r=e;return(0,s.decodeUtf8String)(r)}return t===u.onnx.AttributeProto.AttributeType.STRINGS&&o instanceof u.onnx.AttributeProto?e.map(s.decodeUtf8String):e}static getValueNoCheck(o){return o instanceof u.onnx.AttributeProto?this.getValueNoCheckFromOnnxFormat(o):this.getValueNoCheckFromOrtFormat(o)}static getValueNoCheckFromOnnxFormat(o){switch(o.type){case u.onnx.AttributeProto.AttributeType.FLOAT:return o.f;case u.onnx.AttributeProto.AttributeType.INT:return o.i;case u.onnx.AttributeProto.AttributeType.STRING:return o.s;case u.onnx.AttributeProto.AttributeType.TENSOR:return o.t;case u.onnx.AttributeProto.AttributeType.GRAPH:return o.g;case u.onnx.AttributeProto.AttributeType.FLOATS:return o.floats;case u.onnx.AttributeProto.AttributeType.INTS:return o.ints;case u.onnx.AttributeProto.AttributeType.STRINGS:return o.strings;case u.onnx.AttributeProto.AttributeType.TENSORS:return o.tensors;case u.onnx.AttributeProto.AttributeType.GRAPHS:return o.graphs;default:throw new Error(`unsupported attribute type: ${u.onnx.AttributeProto.AttributeType[o.type]}`)}}static getValueNoCheckFromOrtFormat(o){switch(o.type()){case h.AttributeType.FLOAT:return o.f();case h.AttributeType.INT:return o.i();case h.AttributeType.STRING:return o.s();case h.AttributeType.TENSOR:return o.t();case h.AttributeType.GRAPH:return o.g();case h.AttributeType.FLOATS:return o.floatsArray();case h.AttributeType.INTS:{const t=[];for(let e=0;e{Object.defineProperty(n,"__esModule",{value:!0}),n.resolveBackend=n.backend=void 0;const u=a(5038),c=new Map;async function p(s){const h=n.backend;if(h[s]!==void 0&&function(f){const l=f;return"initialize"in l&&typeof l.initialize=="function"&&"createSessionHandler"in l&&typeof l.createSessionHandler=="function"&&"dispose"in l&&typeof l.dispose=="function"}(h[s])){const f=h[s];let l=f.initialize();if(typeof l=="object"&&"then"in l&&(l=await l),l)return c.set(s,f),f}}n.backend={webgl:new u.WebGLBackend},n.resolveBackend=async function s(h){if(!h)return s(["webgl"]);{const f=typeof h=="string"?[h]:h;for(const l of f){const o=c.get(l);if(o)return o;const t=await p(l);if(t)return t}}throw new Error("no available backend to use")}},5038:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.WebGLBackend=void 0;const u=a(1670),c=a(6231),p=a(6416),s=a(7305);n.WebGLBackend=class{get contextId(){return u.env.webgl.contextId}set contextId(h){u.env.webgl.contextId=h}get matmulMaxBatchSize(){return u.env.webgl.matmulMaxBatchSize}set matmulMaxBatchSize(h){u.env.webgl.matmulMaxBatchSize=h}get textureCacheMode(){return u.env.webgl.textureCacheMode}set textureCacheMode(h){u.env.webgl.textureCacheMode=h}get pack(){return u.env.webgl.pack}set pack(h){u.env.webgl.pack=h}get async(){return u.env.webgl.async}set async(h){u.env.webgl.async=h}initialize(){try{return this.glContext=(0,s.createWebGLContext)(this.contextId),typeof this.matmulMaxBatchSize!="number"&&(this.matmulMaxBatchSize=16),typeof this.textureCacheMode!="string"&&(this.textureCacheMode="full"),typeof this.pack!="boolean"&&(this.pack=!1),typeof this.async!="boolean"&&(this.async=!1),c.Logger.setWithEnv(u.env),c.Logger.verbose("WebGLBackend",`Created WebGLContext: ${typeof this.glContext} with matmulMaxBatchSize: ${this.matmulMaxBatchSize}; textureCacheMode: ${this.textureCacheMode}; pack: ${this.pack}; async: ${this.async}.`),!0}catch(h){return c.Logger.warning("WebGLBackend",`Unable to initialize WebGLBackend. ${h}`),!1}}createSessionHandler(h){return new p.WebGLSessionHandler(this,h)}dispose(){this.glContext.dispose()}}},5107:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.CoordsGlslLib=void 0;const u=a(2517),c=a(8520),p=a(5060),s=a(7859),h=a(9390);class f extends c.GlslLib{constructor(o){super(o)}getFunctions(){return Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},this.offsetToCoords()),this.coordsToOffset()),this.toVec()),this.valueFrom()),this.getCommonUtilFuncs()),this.getInputsSamplingSnippets()),this.getOutputSamplingSnippet())}getCustomTypes(){return{}}offsetToCoords(){return{offsetToCoords:new c.GlslLibRoutine(` - vec2 offsetToCoords(int offset, int width, int height) { - int t = offset / width; - int s = offset - t*width; - vec2 coords = (vec2(s,t) + vec2(0.5,0.5)) / vec2(width, height); - return coords; - } - `)}}coordsToOffset(){return{coordsToOffset:new c.GlslLibRoutine(` - int coordsToOffset(vec2 coords, int width, int height) { - float s = coords.s * float(width); - float t = coords.t * float(height); - int offset = int(t) * width + int(s); - return offset; - } - `)}}getOutputSamplingSnippet(){const o=this.context.outputTextureLayout;return o.isPacked?this.getPackedOutputSamplingSnippet(o):this.getUnpackedOutputSamplingSnippet(o)}getPackedOutputSamplingSnippet(o){const t=o.unpackedShape,e=[o.width,o.height],r={},i="getOutputCoords";switch(t.length){case 0:r[i]=this.getOutputScalarCoords();break;case 1:r[i]=this.getOutputPacked1DCoords(t,e);break;case 2:r[i]=this.getOutputPacked2DCoords(t,e);break;case 3:r[i]=this.getOutputPacked3DCoords(t,e);break;default:r[i]=this.getOutputPackedNDCoords(t,e)}const d=` - void setOutput(vec4 val) { - ${(0,p.getGlsl)(this.context.glContext.version).output} = val; - } - `;return r.floatTextureSetRGBA=new c.GlslLibRoutine(d),r}getUnpackedOutputSamplingSnippet(o){const t=o.unpackedShape,e=[o.width,o.height],r={},i="getOutputCoords";switch(t.length){case 0:r[i]=this.getOutputScalarCoords();break;case 1:r[i]=this.getOutputUnpacked1DCoords(t,e);break;case 2:r[i]=this.getOutputUnpacked2DCoords(t,e);break;case 3:r[i]=this.getOutputUnpacked3DCoords(t,e);break;case 4:r[i]=this.getOutputUnpacked4DCoords(t,e);break;case 5:r[i]=this.getOutputUnpacked5DCoords(t,e);break;case 6:r[i]=this.getOutputUnpacked6DCoords(t,e);break;default:throw new Error(`Unsupported output dimensionality: ${t.length}`)}const d=` - void setOutput(float val) { - ${(0,p.getGlsl)(this.context.glContext.version).output} = vec4(val, 0, 0, 0); - } - `;return r.floatTextureSetR=new c.GlslLibRoutine(d),r}getOutputScalarCoords(){return new c.GlslLibRoutine(` - int getOutputCoords() { - return 0; - } - `)}getOutputPacked1DCoords(o,t){const e=t;let r="";return e[0]===1?(r=` - int getOutputCoords() { - return 2 * int(TexCoords.y * ${e[1]}.0); - } - `,new c.GlslLibRoutine(r)):e[1]===1?(r=` - int getOutputCoords() { - return 2 * int(TexCoords.x * ${e[0]}.0); - } - `,new c.GlslLibRoutine(r)):(r=` - int getOutputCoords() { - ivec2 resTexRC = ivec2(TexCoords.xy * - vec2(${e[0]}, ${e[1]})); - return 2 * (resTexRC.y * ${e[0]} + resTexRC.x); - } - `,new c.GlslLibRoutine(r))}getOutputPacked2DCoords(o,t){let e="";if(u.ArrayUtil.arraysEqual(o,t))return e=` - ivec2 getOutputCoords() { - return 2 * ivec2(TexCoords.xy * vec2(${t[0]}, ${t[1]})); - } - `,new c.GlslLibRoutine(e);const r=t,i=Math.ceil(o[1]/2);return e=` - ivec2 getOutputCoords() { - ivec2 resTexRC = ivec2(TexCoords.xy * - vec2(${r[0]}, ${r[1]})); - - int index = resTexRC.y * ${r[0]} + resTexRC.x; - - // reverse r and c order for packed texture - int r = imod(index, ${i}) * 2; - int c = 2 * (index / ${i}); - - return ivec2(r, c); - } - `,new c.GlslLibRoutine(e)}getOutputPacked3DCoords(o,t){const e=[t[0],t[1]],r=Math.ceil(o[2]/2),i=r*Math.ceil(o[1]/2),d=` - ivec3 getOutputCoords() { - ivec2 resTexRC = ivec2(TexCoords.xy * - vec2(${e[0]}, ${e[1]})); - int index = resTexRC.y * ${e[0]} + resTexRC.x; - - int b = index / ${i}; - index -= b * ${i}; - - // reverse r and c order for packed texture - int r = imod(index, ${r}) * 2; - int c = 2 * (index / ${r}); - - return ivec3(b, r, c); - } - `;return new c.GlslLibRoutine(d)}getOutputPackedNDCoords(o,t){const e=[t[0],t[1]],r=Math.ceil(o[o.length-1]/2),i=r*Math.ceil(o[o.length-2]/2);let d=i,g="",m="b, r, c";for(let _=2;_=0;--m)i[m]=i[m+1]*o[m+1];const d=["r","c","d"],g=i.map((m,b)=>`int ${d[b]} = index / ${m}; ${b===i.length-1?`int ${d[b+1]} = index - ${d[b]} * ${m}`:`index -= ${d[b]} * ${m}`};`).join("");return e=` - ivec3 getOutputCoords() { - ivec2 resTexRC = ivec2(TexCoords.xy * - vec2(${t[0]}, ${t[1]})); - int index = resTexRC.y * ${t[0]} + resTexRC.x; - ${g} - return ivec3(r, c, d); - } - `,new c.GlslLibRoutine(e)}getOutputUnpacked4DCoords(o,t){let e="";const r=o.length;let i=null;r<2&&(i=[]),i=new Array(r-1),i[r-2]=o[r-1];for(let m=r-3;m>=0;--m)i[m]=i[m+1]*o[m+1];const d=["r","c","d","d2"],g=i.map((m,b)=>`int ${d[b]} = index / ${m}; ${b===i.length-1?`int ${d[b+1]} = index - ${d[b]} * ${m}`:`index -= ${d[b]} * ${m}`};`).join("");return e=` - ivec4 getOutputCoords() { - ivec2 resTexRC = ivec2(TexCoords.xy * - vec2(${t[0]}, ${t[1]})); - int index = resTexRC.y * ${t[0]} + resTexRC.x; - ${g} - return ivec4(r, c, d, d2); - } - `,new c.GlslLibRoutine(e)}getOutputUnpacked5DCoords(o,t){let e="";const r=o.length;let i=null;r<2&&(i=[]),i=new Array(r-1),i[r-2]=o[r-1];for(let m=r-3;m>=0;--m)i[m]=i[m+1]*o[m+1];const d=["r","c","d","d2","d3"],g=i.map((m,b)=>`int ${d[b]} = index / ${m}; ${b===i.length-1?`int ${d[b+1]} = index - ${d[b]} * ${m}`:`index -= ${d[b]} * ${m}`};`).join("");return e=` - ivec5 getOutputCoords() { - ivec2 resTexRC = ivec2(TexCoords.xy * - vec2(${t[0]}, ${t[1]})); - int index = resTexRC.y * ${t[0]} + resTexRC.x; - ${g} - return ivec5(r, c, d, d2, d3); - } - `,new c.GlslLibRoutine(e)}getOutputUnpacked6DCoords(o,t){let e="";const r=o.length;let i=null;r<2&&(i=[]),i=new Array(r-1),i[r-2]=o[r-1];for(let m=r-3;m>=0;--m)i[m]=i[m+1]*o[m+1];const d=["r","c","d","d2","d3","d4"],g=i.map((m,b)=>`int ${d[b]} = index / ${m}; ${b===i.length-1?`int ${d[b+1]} = index - ${d[b]} * ${m}`:`index -= ${d[b]} * ${m}`};`).join("");return e=` - ivec6 getOutputCoords() { - ivec2 resTexRC = ivec2(TexCoords.xy * - vec2(${t[0]}, ${t[1]})); - int index = resTexRC.y * ${t[0]} + resTexRC.x; - ${g} - return ivec6(r, c, d, d2, d3, d4); - } - `,new c.GlslLibRoutine(e)}getCommonUtilFuncs(){const o={};let t="uvFromFlat";o[t]=new c.GlslLibRoutine(` - vec2 uvFromFlat(int texNumR, int texNumC, int index) { - int texC = index / texNumR; - int texR = index - texC * texNumR; - // TODO: swap texR, texC order in following function so row is corresponding to u and column is corresponding to - // v. - return (vec2(texR, texC) + halfCR) / vec2(texNumR, texNumC); - } - `),t="packedUVfrom1D",o[t]=new c.GlslLibRoutine(` - vec2 packedUVfrom1D(int texNumR, int texNumC, int index) { - int texelIndex = index / 2; - int texR = texelIndex / texNumC; - int texC = texelIndex - texR * texNumC; - return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); - } - `),t="packedUVfrom2D",o[t]=new c.GlslLibRoutine(` - vec2 packedUVfrom2D(int texNumR, int texNumC, int texelsInLogicalRow, int row, int col) { - int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2); - int texR = texelIndex / texNumC; - int texC = texelIndex - texR * texNumC; - return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); - } - `),t="packedUVfrom3D",o[t]=new c.GlslLibRoutine(` - vec2 packedUVfrom3D(int texNumR, int texNumC, - int texelsInBatch, int texelsInLogicalRow, int b, - int row, int col) { - int index = b * texelsInBatch + (row / 2) * texelsInLogicalRow + (col / 2); - int texR = index / texNumC; - int texC = index - texR * texNumC; - return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); - } - `),t="sampleTexture";const e=(0,p.getGlsl)(this.context.glContext.version);return o[t]=new c.GlslLibRoutine(` - float sampleTexture(sampler2D textureSampler, vec2 uv) { - return ${e.texture2D}(textureSampler, uv).r; - }`),o}getInputsSamplingSnippets(){const o={},t=this.context.outputTextureLayout;return this.context.programInfo.inputNames.forEach((e,r)=>{const i=this.context.inputTextureLayouts[r],d=(0,h.generateShaderFuncNameFromInputSamplerName)(e);i.isPacked?o[d]=this.getPackedSamplerFromInput(d,e,i):o[d]=this.getUnpackedSamplerFromInput(d,e,i);const g=(0,h.generateShaderFuncNameFromInputSamplerNameAtOutCoords)(e);i.unpackedShape.length<=t.unpackedShape.length&&(i.isPacked?o[g]=this.getPackedSamplerAtOutputCoords(g,i,t,e):o[g]=this.getUnpackedSamplerAtOutputCoords(g,i,t,e))}),o}getPackedSamplerAtOutputCoords(o,t,e,r){const i=t.unpackedShape,d=e.unpackedShape,g=r,m=(0,h.generateShaderFuncNameFromInputSamplerName)(g),b=i.length,_=d.length,v=u.BroadcastUtil.getBroadcastDims(i,d),w=(0,h.getCoordsDataType)(_),S=_-b;let A;const O=(0,h.getGlChannels)();A=b===0?"":_<2&&v.length>=1?"coords = 0;":v.map(F=>`coords.${O[F+S]} = 0;`).join(` -`);let x="";x=_<2&&b>0?"coords":i.map((F,H)=>`coords.${O[H+S]}`).join(", ");let I="return outputValue;";const N=u.ShapeUtil.size(i)===1,B=u.ShapeUtil.size(d)===1;if(b!==1||N||B){if(N&&!B)I=_===1?` - return vec4(outputValue.x, outputValue.x, 0., 0.); - `:` - return vec4(outputValue.x); - `;else if(v.length){const F=b-2,H=b-1;v.indexOf(F)>-1&&v.indexOf(H)>-1?I="return vec4(outputValue.x);":v.indexOf(F)>-1?I="return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);":v.indexOf(H)>-1&&(I="return vec4(outputValue.xx, outputValue.zz);")}}else I=` - return vec4(outputValue.xy, outputValue.xy); - `;const L=` - vec4 ${o}() { - ${w} coords = getOutputCoords(); - - int lastDim = coords.${O[_-1]}; - coords.${O[_-1]} = coords.${O[_-2]}; - coords.${O[_-2]} = lastDim; - - ${A} - vec4 outputValue = ${m}(${x}); - ${I} - } - `;return new c.GlslLibRoutine(L,["coordinates.getOutputCoords"])}getUnpackedSamplerAtOutputCoords(o,t,e,r){const i=[e.width,e.height],d=[t.width,t.height],g=t.unpackedShape.length,m=e.unpackedShape.length,b=t.unpackedShape,_=e.unpackedShape,v=(0,h.generateShaderFuncNameFromInputSamplerName)(r);if(g===m&&u.ArrayUtil.arraysEqual(d,i)){const B=` - float ${o}() { - return sampleTexture(${r}, TexCoords); - } - `;return new c.GlslLibRoutine(B,["coordinates.sampleTexture"])}const w=(0,h.getCoordsDataType)(m),S=u.BroadcastUtil.getBroadcastDims(b,_),A=m-g;let O;const x=(0,h.getGlChannels)();O=g===0?"":m<2&&S.length>=1?"coords = 0;":S.map(B=>`coords.${x[B+A]} = 0;`).join(` -`);let I="";I=m<2&&g>0?"coords":t.unpackedShape.map((B,L)=>`coords.${x[L+A]}`).join(", ");const N=` - float ${o}() { - ${w} coords = getOutputCoords(); - ${O} - return ${v}(${I}); - } - `;return new c.GlslLibRoutine(N,["coordinates.getOutputCoords"])}getPackedSamplerFromInput(o,t,e){switch(e.unpackedShape.length){case 0:return this.getPackedSamplerScalar(o,t);case 1:return this.getPackedSampler1D(o,t,e);case 2:return this.getPackedSampler2D(o,t,e);case 3:return this.getPackedSampler3D(o,t,e);default:return this.getPackedSamplerND(o,t,e)}}getUnpackedSamplerFromInput(o,t,e){const r=e.unpackedShape;switch(r.length){case 0:return this.getUnpackedSamplerScalar(o,t,e);case 1:return this.getUnpackedSampler1D(o,t,e);case 2:return this.getUnpackedSampler2D(o,t,e);case 3:return this.getUnpackedSampler3D(o,t,e);case 4:return this.getUnpackedSampler4D(o,t,e);case 5:return this.getUnpackedSampler5D(o,t,e);case 6:return this.getUnpackedSampler6D(o,t,e);default:throw new Error(`Unsupported dimension ${r.length}-D`)}}getPackedSamplerScalar(o,t){const e=` - vec4 ${o}() { - return ${(0,p.getGlsl)(this.context.glContext.version).texture2D}(${t}, halfCR); - } - `;return new c.GlslLibRoutine(e)}getPackedSampler1D(o,t,e){const r=[e.width,e.height],i=[r[1],r[0]],d=(0,p.getGlsl)(this.context.glContext.version),g=`vec4 ${o}(int index) { - vec2 uv = packedUVfrom1D( - ${i[0]}, ${i[1]}, index); - return ${d.texture2D}(${t}, uv); - }`;return new c.GlslLibRoutine(g,["coordinates.packedUVfrom1D"])}getPackedSampler2D(o,t,e){const r=e.unpackedShape,i=[e.width,e.height],d=(0,p.getGlsl)(this.context.glContext.version),g=i[0],m=i[1];if(i!=null&&u.ArrayUtil.arraysEqual(r,i)){const w=`vec4 ${o}(int row, int col) { - vec2 uv = (vec2(col, row) + halfCR) / vec2(${m}.0, ${g}.0); - return ${d.texture2D}(${t}, uv); - }`;return new c.GlslLibRoutine(w)}const b=i,_=Math.ceil(r[1]/2),v=`vec4 ${o}(int row, int col) { - vec2 uv = packedUVfrom2D(${b[1]}, ${b[0]}, ${_}, row, col); - return ${d.texture2D}(${t}, uv); - }`;return new c.GlslLibRoutine(v,["coordinates.packedUVfrom2D"])}getPackedSampler3D(o,t,e){const r=e.unpackedShape,i=[e.width,e.height],d=[i[0],i[1]],g=(0,p.getGlsl)(this.context.glContext.version);if(r[0]===1){const w=r.slice(1),S=[1,2],A=(0,h.squeezeInputShape)(r,w),O=["b","row","col"],x=JSON.parse(JSON.stringify(e));x.unpackedShape=A;const I=this.getPackedSamplerFromInput(o,t,x),N=`${I.routineBody} - vec4 ${o}(int b, int row, int col) { - return ${o}(${(0,h.getSqueezedParams)(O,S)}); - } `;return new c.GlslLibRoutine(N,I.dependencies)}const m=d[0],b=d[1],_=Math.ceil(r[2]/2),v=`vec4 ${o}(int b, int row, int col) { - vec2 uv = packedUVfrom3D( - ${b}, ${m}, ${_*Math.ceil(r[1]/2)}, ${_}, b, row, col); - return ${g.texture2D}(${t}, uv);}`;return new c.GlslLibRoutine(v,["coordinates.packedUVfrom3D"])}getPackedSamplerND(o,t,e){const r=e.unpackedShape,i=r.length,d=[e.width,e.height],g=(0,p.getGlsl)(this.context.glContext.version),m=[d[0],d[1]],b=m[1],_=m[0],v=Math.ceil(r[i-1]/2);let w=v*Math.ceil(r[i-2]/2),S="int b, int row, int col",A=`b * ${w} + (row / 2) * ${v} + (col / 2)`;for(let x=2;x{const r=this.context.inputTextureLayouts[e],i=(r.unpackedShape.length>0?r.unpackedShape:r.shape).length;let d=`_${t}`;o[d]=new c.GlslLibRoutine(this.getValueFromSingle(t,i,r.width,r.height,!1),[`shapeUtils.indicesToOffset${d}`,"coordinates.offsetToCoords","fragcolor.getColorAsFloat"]),d+="_T",o[d]=new c.GlslLibRoutine(this.getValueFromSingle(t,i,r.width,r.height,!0),[`shapeUtils.indicesToOffset${d}`,"coordinates.offsetToCoords","fragcolor.getColorAsFloat"])}),o}getValueFromSingle(o,t,e,r,i){let d=`_${o}`;return i&&(d+="_T"),` - float ${d}(int m[${t}]) { - int offset = indicesToOffset${d}(m); - vec2 coords = offsetToCoords(offset, ${e}, ${r}); - float value = getColorAsFloat(${(0,p.getGlsl)(this.context.glContext.version).texture2D}(${o}, coords)); - return value; - } - `}getPackedValueFrom(o,t,e,r,i){let d=`_${o}_Pack`;return i&&(d+="_T"),` - vec4 ${d}(int m[${t}]) { - int offset = indicesToOffset_${o}(m); - vec2 coords = offsetToCoords(offset, ${e}, ${r}); - return ${(0,p.getGlsl)(this.context.glContext.version).texture2D}(${o}, coords); - } - `}}n.CoordsGlslLib=f},8520:(y,n)=>{var a;Object.defineProperty(n,"__esModule",{value:!0}),n.TopologicalSortGlslRoutines=n.GlslLibRoutineNode=n.GlslLibRoutine=n.GlslLib=n.GlslContext=n.FunctionType=void 0,(a=n.FunctionType||(n.FunctionType={}))[a.ValueBased=0]="ValueBased",a[a.Positional=1]="Positional",n.GlslContext=class{constructor(u,c,p,s){this.glContext=u,this.programInfo=c,this.inputTextureLayouts=p,this.outputTextureLayout=s}},n.GlslLib=class{constructor(u){this.context=u}},n.GlslLibRoutine=class{constructor(u,c){this.routineBody=u,this.dependencies=c}},n.GlslLibRoutineNode=class{constructor(u,c,p){this.name=u,this.dependencies=p||[],c&&(this.routineBody=c)}addDependency(u){u&&this.dependencies.push(u)}},n.TopologicalSortGlslRoutines=class{static returnOrderedNodes(u){if(!u||u.length===0)return[];if(u.length===1)return u;const c=new Set,p=new Set,s=new Array;return this.createOrderedNodes(u,c,p,s),s}static createOrderedNodes(u,c,p,s){for(let h=0;h0)for(let f=0;f{Object.defineProperty(n,"__esModule",{value:!0}),n.EncodingGlslLib=void 0;const u=a(8520);class c extends u.GlslLib{constructor(s){super(s)}getFunctions(){return Object.assign(Object.assign({},this.encodeFloat32()),this.decodeFloat32())}getCustomTypes(){return{}}encodeFloat32(){return{encode:new u.GlslLibRoutine(`highp vec4 encode(highp float f) { - return vec4(f, 0.0, 0.0, 0.0); - } - `)}}decodeFloat32(){return{decode:new u.GlslLibRoutine(`highp float decode(highp vec4 rgba) { - return rgba.r; - } - `)}}encodeUint8(){const s=c.isLittleEndian()?"rgba.rgba=rgba.abgr;":"";return{encode:new u.GlslLibRoutine(` - highp vec4 encode(highp float f) { - highp float F = abs(f); - highp float Sign = step(0.0,-f); - highp float Exponent = floor(log2(F)); - highp float Mantissa = (exp2(- Exponent) * F); - Exponent = floor(log2(F) + 127.0) + floor(log2(Mantissa)); - highp vec4 rgba; - rgba[0] = 128.0 * Sign + floor(Exponent*exp2(-1.0)); - rgba[1] = 128.0 * mod(Exponent,2.0) + mod(floor(Mantissa*128.0),128.0); - rgba[2] = floor(mod(floor(Mantissa*exp2(23.0 -8.0)),exp2(8.0))); - rgba[3] = floor(exp2(23.0)*mod(Mantissa,exp2(-15.0))); - ${s} - rgba = rgba / 255.0; // values need to be normalized to [0,1] - return rgba; - } - `)}}decodeUint8(){const s=c.isLittleEndian()?"rgba.rgba=rgba.abgr;":"";return{decode:new u.GlslLibRoutine(` - highp float decode(highp vec4 rgba) { - rgba = rgba * 255.0; // values need to be de-normalized from [0,1] to [0,255] - ${s} - highp float Sign = 1.0 - step(128.0,rgba[0])*2.0; - highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0; - highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000); - highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 )); - return Result; - } - `)}}static isLittleEndian(){const s=new ArrayBuffer(4),h=new Uint32Array(s),f=new Uint8Array(s);if(h[0]=3735928559,f[0]===239)return!0;if(f[0]===222)return!1;throw new Error("unknown endianness")}}n.EncodingGlslLib=c},9894:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.FragColorGlslLib=void 0;const u=a(8520),c=a(5060);class p extends u.GlslLib{constructor(h){super(h)}getFunctions(){return Object.assign(Object.assign({},this.setFragColor()),this.getColorAsFloat())}getCustomTypes(){return{}}setFragColor(){const h=(0,c.getGlsl)(this.context.glContext.version);return{setFragColor:new u.GlslLibRoutine(` - void setFragColor(float value) { - ${h.output} = encode(value); - } - `,["encoding.encode"])}}getColorAsFloat(){return{getColorAsFloat:new u.GlslLibRoutine(` - float getColorAsFloat(vec4 color) { - return decode(color); - } - `,["encoding.decode"])}}}n.FragColorGlslLib=p},2848:(y,n)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.replaceInlines=void 0;const a=/@inline[\s\n\r]+(\w+)[\s\n\r]+([0-9a-zA-Z_]+)\s*\(([^)]*)\)\s*{(([^}]|[\n\r])*)}/gm;n.replaceInlines=function(u){const c={};let p;for(;(p=a.exec(u))!==null;){const s=p[3].split(",").map(h=>{const f=h.trim().split(" ");return f&&f.length===2?{type:f[0],name:f[1]}:null}).filter(h=>h!==null);c[p[2]]={params:s,body:p[4]}}for(const s in c){const h="(\\w+)?\\s+([_0-9a-zA-Z]+)\\s+=\\s+__FUNC__\\((.*)\\)\\s*;".replace("__FUNC__",s),f=new RegExp(h,"gm");for(;(p=f.exec(u))!==null;){const l=p[1],o=p[2],t=p[3].split(","),e=l?`${l} ${o};`:"";let r=c[s].body,i="";c[s].params.forEach((g,m)=>{g&&(i+=`${g.type} ${g.name} = ${t[m]}; -`)}),r=`${i} - ${r}`,r=r.replace("return",`${o} = `);const d=` - ${e} - { - ${r} - } - `;u=u.replace(p[0],d)}}return u.replace(a,"")}},8879:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.GlslPreprocessor=void 0;const u=a(8520),c=a(2848),p=a(5483),s=a(5060);n.GlslPreprocessor=class{constructor(h,f,l,o){this.libs={},this.glslLibRoutineDependencyGraph={},this.context=new u.GlslContext(h,f,l,o),Object.keys(p.glslRegistry).forEach(e=>{const r=new p.glslRegistry[e](this.context);this.libs[e]=r});const t=this.glslLibRoutineDependencyGraph;for(const e in this.libs){const r=this.libs[e].getFunctions();for(const i in r){const d=e+"."+i;let g;t[d]?(g=t[d],g.routineBody=r[i].routineBody):(g=new u.GlslLibRoutineNode(d,r[i].routineBody),t[d]=g);const m=r[i].dependencies;if(m)for(let b=0;b{const o=l.split(".")[1];h.indexOf(o)!==-1&&f.push(this.glslLibRoutineDependencyGraph[l])}),u.TopologicalSortGlslRoutines.returnOrderedNodes(f)}getUniforms(h,f){const l=[];if(h)for(const o of h)l.push(`uniform sampler2D ${o};`);if(f)for(const o of f)l.push(`uniform ${o.type} ${o.name}${o.arrayLength?`[${o.arrayLength}]`:""};`);return l.join(` -`)}}},5483:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.glslRegistry=void 0;const u=a(5107),c=a(7341),p=a(9894),s=a(2655),h=a(3891);n.glslRegistry={encoding:c.EncodingGlslLib,fragcolor:p.FragColorGlslLib,vec:h.VecGlslLib,shapeUtils:s.ShapeUtilsGlslLib,coordinates:u.CoordsGlslLib}},2655:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.ShapeUtilsGlslLib=void 0;const u=a(8520);class c extends u.GlslLib{constructor(s){super(s)}getFunctions(){return Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},this.bcastIndex()),this.bcastMatmulIndex()),this.offsetToIndices()),this.indicesToOffset()),this.incrementIndices())}getCustomTypes(){return{}}bcastIndex(){const s=this.context.outputTextureLayout.shape.length,h={};return this.context.programInfo.inputNames.forEach((f,l)=>{const o=this.context.inputTextureLayouts[l].unpackedShape;if(o.length<=s){const t=o.length,e=s-t,r=`bcastIndices_${f}`;let i="";for(let g=0;g{const o=this.context.inputTextureLayouts[l].shape;if(!(o.length<2||o.length>s)){const t=o.length,e=s-t,r=`bcastMatmulIndices_${f}`;let i="";for(let g=0;g{const l=this.context.inputTextureLayouts[f].shape,o=this.context.inputTextureLayouts[f].strides,t=l.length;let e=`indicesToOffset_${h}`;s[e]=new u.GlslLibRoutine(c.indexToOffsetSingle(e,t,o)),e=`indicesToOffset_${h}_T`,s[e]=new u.GlslLibRoutine(c.indexToOffsetSingle(e,t,o.slice().reverse()))}),s}static indexToOffsetSingle(s,h,f){let l="";for(let o=h-1;o>=0;--o)l+=` - offset += indices[${o}] * ${f[o]}; - `;return` - int ${s}(int indices[${h}]) { - int offset = 0; - ${l} - return offset; - } - `}offsetToIndices(){const s={};return this.context.programInfo.inputNames.forEach((h,f)=>{const l=this.context.inputTextureLayouts[f].shape,o=this.context.inputTextureLayouts[f].strides,t=l.length;let e=`offsetToIndices_${h}`;s[e]=new u.GlslLibRoutine(c.offsetToIndicesSingle(e,t,o)),e=`offsetToIndices_${h}_T`,s[e]=new u.GlslLibRoutine(c.offsetToIndicesSingle(e,t,o.slice().reverse()))}),s}static offsetToIndicesSingle(s,h,f){const l=[];for(let o=0;o{const l=this.context.inputTextureLayouts[f].shape,o=l.length,t=`incrementIndices_${h}`;let e="";for(let i=0;i= 0; --i) { - if(i > axis) continue; - indices[i] += 1; - if(indices[i] < shape[i]) { - break; - } - indices[i] = 0; - } - } - `;s[t]=new u.GlslLibRoutine(r)}),s}}n.ShapeUtilsGlslLib=c},5060:(y,n)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.getDefaultFragShaderMain=n.getFragShaderPreamble=n.getVertexShaderSource=n.getGlsl=void 0;const a={version:"",attribute:"attribute",varyingVertex:"varying",varyingFrag:"varying",texture2D:"texture2D",output:"gl_FragColor",outputDeclaration:""},u={version:"#version 300 es",attribute:"in",varyingVertex:"out",varyingFrag:"in",texture2D:"texture",output:"outputColor",outputDeclaration:"out vec4 outputColor;"};function c(p){return p===1?a:u}n.getGlsl=c,n.getVertexShaderSource=function(p){const s=c(p);return`${s.version} - precision highp float; - ${s.attribute} vec3 position; - ${s.attribute} vec2 textureCoord; - - ${s.varyingVertex} vec2 TexCoords; - - void main() - { - gl_Position = vec4(position, 1.0); - TexCoords = textureCoord; - }`},n.getFragShaderPreamble=function(p){const s=c(p);return`${s.version} - precision highp float; - precision highp int; - precision highp sampler2D; - ${s.varyingFrag} vec2 TexCoords; - ${s.outputDeclaration} - const vec2 halfCR = vec2(0.5, 0.5); - - // Custom vector types to handle higher dimenalities. - struct ivec5 - { - int x; - int y; - int z; - int w; - int u; - }; - - struct ivec6 - { - int x; - int y; - int z; - int w; - int u; - int v; - }; - - int imod(int x, int y) { - return x - y * (x / y); - } - - `},n.getDefaultFragShaderMain=function(p,s){return` - void main() { - int indices[${s}]; - toVec(TexCoords, indices); - vec4 result = vec4(process(indices)); - ${c(p).output} = result; - } - `}},3891:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.VecGlslLib=void 0;const u=a(8520);class c extends u.GlslLib{constructor(s){super(s)}getCustomTypes(){return{}}getFunctions(){return Object.assign(Object.assign(Object.assign(Object.assign({},this.binaryVecFunctions()),this.copyVec()),this.setVecItem()),this.getVecItem())}binaryVecFunctions(){const s=this.context.outputTextureLayout.shape.length,h={add:"+=",sub:"-=",mul:"*=",div:"/="},f={};for(const l in h){const o=`${l}Vec`;let t="";for(let r=0;r{Object.defineProperty(n,"__esModule",{value:!0}),n.WebGLInferenceHandler=void 0;const u=a(6231),c=a(9162),p=a(2517),s=a(2403),h=a(7019),f=a(8710),l=a(5611),o=a(4057),t=a(2039);n.WebGLInferenceHandler=class{constructor(e){this.session=e,this.packedTextureDataCache=new Map,this.unpackedTextureDataCache=new Map}calculateTextureWidthAndHeight(e,r){return(0,o.calculateTextureWidthAndHeight)(this.session.layoutStrategy,e,r)}executeProgram(e,r){if(r.length{const S=w.map(O=>`${O.unpackedShape.join(",")};${O.width}x${O.height}`).join("_");let A=v.name;return v.cacheHint&&(A+="["+v.cacheHint+"]"),A+=":"+S,A})(e,i);let g=this.session.programManager.getArtifact(d);const m=g?g.programInfo:typeof e.get=="function"?e.get():e,b=(0,o.createTextureLayoutFromTextureType)(this.session.layoutStrategy,m.output.dims,m.output.textureType),_=this.createTextureData(b,m.output.type);return g||(g=this.session.programManager.build(m,i,_),this.session.programManager.setArtifact(d,g)),this.runProgram(g,i,_),_}run(e,r){return this.executeProgram(e,r).tensor}runProgram(e,r,i){for(let d=0;dthis.readTexture(m),async b=>this.readTextureAsync(m),void 0,g),texture:i});return this.setTextureData(m.tensor.dataId,m,e.isPacked),m}getTextureData(e,r=!1){return this.session.isInitializer(e)?this.session.getTextureData(e,r):r?this.packedTextureDataCache.get(e):this.unpackedTextureDataCache.get(e)}setTextureData(e,r,i=!1){this.session.isInitializer(e)?this.session.setTextureData(e,r,i):(i?this.packedTextureDataCache:this.unpackedTextureDataCache).set(e,r)}isTextureLayoutCached(e,r=!1){return!!this.getTextureData(e.dataId,r)}dispose(){this.session.textureManager.clearActiveTextures(),this.packedTextureDataCache.forEach(e=>this.session.textureManager.releaseTexture(e)),this.packedTextureDataCache=new Map,this.unpackedTextureDataCache.forEach(e=>this.session.textureManager.releaseTexture(e)),this.unpackedTextureDataCache=new Map}readTexture(e){return e.isPacked?this.readTexture(this.unpack(e)):this.session.backend.glContext.isFloat32DownloadSupported?this.session.textureManager.readTexture(e,e.tensor.type,e.channels):this.session.textureManager.readUint8TextureAsFloat((0,f.encodeAsUint8)(this,e))}async readTextureAsync(e){return e.isPacked?this.readTextureAsync(this.unpack(e)):this.session.backend.glContext.isFloat32DownloadSupported?this.session.textureManager.readTextureAsync(e,e.tensor.type,e.channels):this.session.textureManager.readUint8TextureAsFloat((0,f.encodeAsUint8)(this,e))}pack(e){return this.executeProgram((0,s.createPackProgramInfoLoader)(this,e.tensor),[e.tensor])}unpack(e){return this.executeProgram((0,l.createUnpackProgramInfoLoader)(this,e.tensor),[e.tensor])}}},1640:function(y,n,a){var u=this&&this.__createBinding||(Object.create?function(X,J,ee,ue){ue===void 0&&(ue=ee);var Ae=Object.getOwnPropertyDescriptor(J,ee);Ae&&!("get"in Ae?!J.__esModule:Ae.writable||Ae.configurable)||(Ae={enumerable:!0,get:function(){return J[ee]}}),Object.defineProperty(X,ue,Ae)}:function(X,J,ee,ue){ue===void 0&&(ue=ee),X[ue]=J[ee]}),c=this&&this.__setModuleDefault||(Object.create?function(X,J){Object.defineProperty(X,"default",{enumerable:!0,value:J})}:function(X,J){X.default=J}),p=this&&this.__importStar||function(X){if(X&&X.__esModule)return X;var J={};if(X!=null)for(var ee in X)ee!=="default"&&Object.prototype.hasOwnProperty.call(X,ee)&&u(J,X,ee);return c(J,X),J};Object.defineProperty(n,"__esModule",{value:!0}),n.WEBGL_OP_RESOLVE_RULES=void 0;const s=a(2898),h=p(a(7839)),f=a(4196),l=a(2069),o=a(8138),t=a(9663),e=a(5193),r=a(7992),i=a(1253),d=a(4776),g=a(6572),m=a(3346),b=a(5623),_=a(2870),v=a(2143),w=a(4939),S=a(718),A=a(2268),O=a(8117),x=a(2278),I=a(5524),N=a(5975),B=a(3933),L=a(6558),F=a(5723),H=a(3738),D=p(a(4909)),j=a(8428),Z=a(9793);n.WEBGL_OP_RESOLVE_RULES=[["Abs","","6+",D.abs],["Acos","","7+",D.acos],["Add","","7+",h.add],["And","","7+",h.and],["Asin","","7+",D.asin],["Atan","","7+",D.atan],["AveragePool","","7+",v.averagePool,v.parseAveragePoolAttributes],["BatchNormalization","","7+",s.batchNormalization,s.parseBatchNormalizationAttributes],["Cast","","6+",f.cast,f.parseCastAttributes],["Ceil","","6+",D.ceil],["Clip","","6-10",D.clip,D.parseClipAttributes],["Clip","","11+",D.clipV11],["Concat","","4+",l.concat,l.parseConcatAttributes],["Conv","","1+",o.conv,o.parseConvAttributes],["ConvTranspose","","1+",t.convTranspose,t.parseConvTransposeAttributes],["Cos","","7+",D.cos],["Div","","7+",h.div],["Dropout","","7+",D.identity],["DepthToSpace","","1+",e.depthToSpace,e.parseDepthToSpaceAttributes],["Equal","","7+",h.equal],["Elu","","6+",D.elu,D.parseEluAttributes],["Exp","","6+",D.exp],["Flatten","","1+",r.flatten,r.parseFlattenAttributes],["Floor","","6+",D.floor],["FusedConv","com.microsoft","1+",o.conv,o.parseConvAttributes],["Gather","","1+",i.gather,i.parseGatherAttributes],["Gemm","","7-10",d.gemm,d.parseGemmAttributesV7],["Gemm","","11+",d.gemm,d.parseGemmAttributesV11],["GlobalAveragePool","","1+",v.globalAveragePool,v.parseGlobalAveragePoolAttributes],["GlobalMaxPool","","1+",v.globalMaxPool],["Greater","","7+",h.greater],["Identity","","1+",D.identity],["ImageScaler","","1+",g.imageScaler,g.parseImageScalerAttributes],["InstanceNormalization","","6+",m.instanceNormalization,m.parseInstanceNormalizationAttributes],["LeakyRelu","","6+",D.leakyRelu,D.parseLeakyReluAttributes],["Less","","7+",h.less],["Log","","6+",D.log],["MatMul","","1+",b.matMul,b.parseMatMulAttributes],["MaxPool","","1+",v.maxPool,v.parseMaxPoolAttributes],["Mul","","7+",h.mul],["Neg","","6+",D.neg],["Not","","1+",D.not],["Or","","7+",h.or],["Pad","","2-10",_.padV2,_.parsePadAttributesV2],["Pad","","11+",_.padV11,_.parsePadAttributesV11],["Pow","","7+",h.pow],["PRelu","","7+",h.pRelu],["ReduceLogSum","","1+",w.reduceLogSum,w.parseReduceAttributes],["ReduceMax","","1+",w.reduceMax,w.parseReduceAttributes],["ReduceMean","","1+",w.reduceMean,w.parseReduceAttributes],["ReduceMin","","1+",w.reduceMin,w.parseReduceAttributes],["ReduceProd","","1+",w.reduceProd,w.parseReduceAttributes],["ReduceSum","","1-12",w.reduceSum,w.parseReduceAttributes],["ReduceSumSquare","","1+",w.reduceLogSumSquare,w.parseReduceAttributes],["Relu","","6+",D.relu],["Reshape","","5+",S.reshape],["Resize","","10",A.resize,A.parseResizeAttributesV10],["Resize","","11+",A.resize,A.parseResizeAttributesV11],["Shape","","1+",O.shape],["Sigmoid","","6+",D.sigmoid],["Sin","","7+",D.sin],["Slice","","10+",x.sliceV10],["Slice","","1-9",x.slice,x.parseSliceAttributes],["Softmax","","1-12",I.softmax,I.parseSoftmaxAttributes],["Softmax","","13+",I.softmaxV13,I.parseSoftmaxAttributesV13],["Split","","2-12",N.split,N.parseSplitAttributes],["Sqrt","","6+",D.sqrt],["Squeeze","","1-12",B.squeeze,B.parseSqueezeAttributes],["Squeeze","","13+",B.squeezeV13],["Sub","","7+",h.sub],["Sum","","6+",L.sum],["Tan","","7+",D.tan],["Tanh","","6+",D.tanh],["Tile","","6+",F.tile],["Transpose","","1+",H.transpose,H.parseTransposeAttributes],["Upsample","","7-8",Z.upsample,Z.parseUpsampleAttributesV7],["Upsample","","9",Z.upsample,Z.parseUpsampleAttributesV9],["Unsqueeze","","1-12",j.unsqueeze,j.parseUnsqueezeAttributes],["Unsqueeze","","13+",j.unsqueezeV13],["Xor","","7+",h.xor]]},2898:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseBatchNormalizationAttributes=n.batchNormalization=void 0;const u=a(246),c=a(5060),p=a(2039),s={name:"BatchNormalization",inputNames:["A","Scale","B","Mean","Variance"],inputTypes:[p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked]};n.batchNormalization=(l,o,t)=>(f(o),[l.run(Object.assign(Object.assign({},s),{cacheHint:t.cacheKey,get:()=>h(l,o,t)}),o)]),n.parseBatchNormalizationAttributes=l=>{const o=l.attributes.getFloat("epsilon",1e-5),t=l.attributes.getFloat("momentum",.9),e=l.attributes.getInt("spatial",1);return(0,u.createAttributeWithCacheKey)({epsilon:o,momentum:t,spatial:e})};const h=(l,o,t)=>{const e=(0,c.getGlsl)(l.session.backend.glContext.version),r=o[0].dims.length,[i,d]=l.calculateTextureWidthAndHeight(o[1].dims,p.TextureType.unpacked),g=` - float process(int[${r}] indices) { - vec2 position = offsetToCoords(indices[1], ${i}, ${d}); - float scale = getColorAsFloat(${e.texture2D}(Scale, position)); - float mean = getColorAsFloat(${e.texture2D}(Mean, position)); - float variance = getColorAsFloat(${e.texture2D}(Variance, position)); - float b = getColorAsFloat(${e.texture2D}(B, position)); - - return scale * ( (_A(indices) - mean) / sqrt(variance + float(${t.epsilon})) ) + b; - }`;return Object.assign(Object.assign({},s),{output:{dims:o[0].dims,type:o[0].type,textureType:p.TextureType.unpacked},shaderSource:g})},f=l=>{if(!l||l.length!==5)throw new Error("BatchNormalization requires 5 inputs.");const o=l[0],t=l[1],e=l[2],r=l[3],i=l[4];if(o.dims.length<3||t.dims.length!==1||e.dims.length!==1||r.dims.length!==1||i.dims.length!==1)throw new Error("invalid input shape.");if(t.dims[0]!==o.dims[1]||e.dims[0]!==o.dims[1]||r.dims[0]!==o.dims[1]||i.dims[0]!==o.dims[1])throw new Error("invalid input shape.");if(o.type!=="float32"&&o.type!=="float64"||t.type!=="float32"&&t.type!=="float64"||e.type!=="float32"&&e.type!=="float64"||r.type!=="float32"&&r.type!=="float64"||i.type!=="float32"&&i.type!=="float64")throw new Error("invalid input tensor types.")}},7839:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.xor=n.sub=n.pRelu=n.pow=n.or=n.mul=n.less=n.greater=n.equal=n.div=n.and=n.add=n.glslPRelu=n.glslPow=n.glslXor=n.glslOr=n.glslAnd=n.glslLess=n.glslGreater=n.glslEqual=n.glslSub=n.glslMul=n.glslDiv=n.glslAdd=void 0;const u=a(2517),c=a(8520),p=a(5060),s=a(2039);function h(){const w="add_";return{body:` - float ${w}(float a, float b) { - return a + b; - } - vec4 ${w}(vec4 v1, vec4 v2) { - return v1 + v2; - } - `,name:w,type:c.FunctionType.ValueBased}}function f(){const w="div_";return{body:` - float ${w}(float a, float b) { - return a / b; - } - vec4 ${w}(vec4 v1, vec4 v2) { - return v1 / v2; - } - `,name:w,type:c.FunctionType.ValueBased}}function l(){const w="mul_";return{body:` - float ${w}(float a, float b) { - return a * b; - } - vec4 ${w}(vec4 v1, vec4 v2) { - return v1 * v2; - } - `,name:w,type:c.FunctionType.ValueBased}}function o(){const w="sub_";return{body:` - float ${w}(float a, float b) { - return a - b; - } - vec4 ${w}(vec4 v1, vec4 v2) { - return v1 - v2; - } - `,name:w,type:c.FunctionType.ValueBased}}function t(){const w="equal_";return{body:` - float ${w}(float a, float b) { - return float(a == b); - } - vec4 ${w}(vec4 v1, vec4 v2) { - return vec4(equal(v1, v2)); - } - `,name:w,type:c.FunctionType.ValueBased}}function e(){const w="greater_";return{body:` - float ${w}(float a, float b) { - return float(a > b); - } - vec4 ${w}(vec4 v1, vec4 v2) { - return vec4( v1.r > v2.r , - v1.g > v2.g, - v1.b > v2.b, - v1.a > v2.a ); - } - `,name:w,type:c.FunctionType.ValueBased}}function r(){const w="less_";return{body:` - float ${w}(float a, float b) { - return float(a < b); - } - vec4 ${w}(vec4 v1, vec4 v2) { - return vec4( v1.r < v2.r , - v1.g < v2.g, - v1.b < v2.b, - v1.a < v2.a ); - } - `,name:w,type:c.FunctionType.ValueBased}}function i(){const w="and_";return{body:` - float ${w}(float a, float b) { - return float( bool(a) && bool(b) ); - } - vec4 ${w}(vec4 v1, vec4 v2) { - bvec4 b1 = bvec4(v1); - bvec4 b2 = bvec4(v2); - return vec4( b1.r && b2.r , - b1.g && b2.g, - b1.b && b2.b, - b1.a && b2.a ); - } - `,name:w,type:c.FunctionType.ValueBased}}function d(){const w="or_";return{body:` - float ${w}(float a, float b) { - return float( bool(a) || bool(b) ); - } - vec4 ${w}(vec4 v1, vec4 v2) { - bvec4 b1 = bvec4(v1); - bvec4 b2 = bvec4(v2); - return vec4( b1.r || b2.r , - b1.g || b2.g, - b1.b || b2.b, - b1.a || b2.a ); - } - `,name:w,type:c.FunctionType.ValueBased}}function g(){const w="xor_";return{body:` - float ${w}(float a, float b) { - return float( bool(a) ^^ bool(b) ); - } - vec4 ${w}(vec4 v1, vec4 v2) { - bvec4 b1 = bvec4(v1); - bvec4 b2 = bvec4(v2); - return vec4( b1.r ^^ b2.r , - b1.g ^^ b2.g, - b1.b ^^ b2.b, - b1.a ^^ b2.a ); - } - `,name:w,type:c.FunctionType.ValueBased}}function m(){return function(w){const S=`${w}_`;return{body:` - float ${S}(float a, float b) { - return ${w}(a, b); - } - vec4 ${S}(vec4 v1, vec4 v2) { - return ${w}(v1, v2); - } - `,name:S,type:c.FunctionType.ValueBased}}("pow")}function b(){const w="prelu_";return{body:` - float ${w}(float a, float b) { - return a < 0.0 ? a * b: a; - } - vec4 ${w}(vec4 v1, vec4 v2) { - return vec4( - v1.r < 0.0 ? v1.r * v2.r: v1.r, - v1.g < 0.0 ? v1.g * v2.g: v1.g, - v1.b < 0.0 ? v1.b * v2.b: v1.b, - v1.a < 0.0 ? v1.a * v2.a: v1.a - ); - } - `,name:w,type:c.FunctionType.ValueBased}}n.glslAdd=h,n.glslDiv=f,n.glslMul=l,n.glslSub=o,n.glslEqual=t,n.glslGreater=e,n.glslLess=r,n.glslAnd=i,n.glslOr=d,n.glslXor=g,n.glslPow=m,n.glslPRelu=b;const _=(w,S,A,O=S[0].type,x)=>{const I=w.session.pack?s.TextureType.packed:s.TextureType.unpacked;return{name:A.name,inputNames:["A","B"],inputTypes:[I,I],cacheHint:x,get:()=>v(w,S,A,O)}},v=(w,S,A,O=S[0].type)=>{const x=w.session.pack?s.TextureType.packed:s.TextureType.unpacked,I=!u.ShapeUtil.areEqual(S[0].dims,S[1].dims);let N=S[0].dims;const B=w.session.pack;if(I){const H=u.BroadcastUtil.calcShape(S[0].dims,S[1].dims,!1);if(!H)throw new Error("Can't perform binary op on the given tensors");N=H;const D=N.length,j=S[0].dims.length!==0?S[0].dims.length:1,Z=S[1].dims.length!==0?S[1].dims.length:1,X=S[0].dims.length!==0?"bcastIndices_A(indices, aindices);":"aindices[0] = 0;",J=S[1].dims.length!==0?"bcastIndices_B(indices, bindices);":"bindices[0] = 0;",ee=(0,p.getGlsl)(w.session.backend.glContext.version),ue=B?` - ${A.body} - void main() { - vec4 a = getAAtOutCoords(); - vec4 b = getBAtOutCoords(); - vec4 result = ${A.name}(a, b); - ${ee.output} = result; - }`:` - ${A.body} - float process(int indices[${D}]) { - int aindices[${j}]; - int bindices[${Z}]; - ${X} - ${J} - return ${A.name}(_A(aindices), _B(bindices)); - }`;return{name:A.name,inputNames:["A","B"],inputTypes:[x,x],output:{dims:N,type:O,textureType:x},shaderSource:ue,hasMain:B}}const L=(0,p.getGlsl)(w.session.backend.glContext.version),F=` - ${A.body} - void main() { - vec4 v1 = ${L.texture2D}(A, TexCoords); - vec4 v2 = ${L.texture2D}(B, TexCoords); - vec4 result = ${A.name}(v1, v2); - ${L.output} = result; - } - `;return{name:A.name,inputNames:["A","B"],inputTypes:[x,x],output:{dims:S[0].dims,type:O,textureType:x},shaderSource:F,hasMain:!0}};n.add=(w,S)=>[w.run(_(w,S,h()),S)],n.and=(w,S)=>[w.run(_(w,S,i(),"bool"),S)],n.div=(w,S)=>[w.run(_(w,S,f()),S)],n.equal=(w,S)=>[w.run(_(w,S,t(),"bool"),S)],n.greater=(w,S)=>[w.run(_(w,S,e(),"bool"),S)],n.less=(w,S)=>[w.run(_(w,S,r(),"bool"),S)],n.mul=(w,S)=>[w.run(_(w,S,l()),S)],n.or=(w,S)=>[w.run(_(w,S,d(),"bool"),S)],n.pow=(w,S)=>[w.run(_(w,S,m()),S)],n.pRelu=(w,S)=>[w.run(_(w,S,b()),S)],n.sub=(w,S)=>[w.run(_(w,S,o()),S)],n.xor=(w,S)=>[w.run(_(w,S,g(),"bool"),S)]},4196:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseCastAttributes=n.cast=void 0;const u=a(2517);n.cast=(p,s,h)=>(c(s),[p.cast(s[0],h)]),n.parseCastAttributes=p=>u.ProtoUtil.tensorDataTypeFromProto(p.attributes.getInt("to"));const c=p=>{if(!p||p.length!==1)throw new Error("Cast requires 1 input.");if(p[0].type==="string")throw new Error("Invalid input type.")}},1163:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createPackedConcatProgramInfoLoader=void 0;const u=a(5060),c=a(2039),p=a(9390),s=a(2827);n.createPackedConcatProgramInfoLoader=(f,l,o)=>{const t=(e=l.length,r=o.cacheKey,{name:"Concat (packed)",inputNames:Array.from({length:e},(i,d)=>`X${d}`),inputTypes:Array(e).fill(c.TextureType.packed),cacheHint:r});var e,r;return Object.assign(Object.assign({},t),{get:()=>((i,d,g,m)=>{const b=g[0].dims.slice();if(m>=b.length||m<-1*b.length)throw new Error("axis specified for concat doesn't match input dimensionality");m<0&&(m=b.length+m);const _=b.slice(0);for(let X=1;XX.dims),x=(0,p.getGlChannels)(v),I=new Array(O.length-1);I[0]=O[0][m];for(let X=1;X= ${I[X-1]}) { - return getChannel( - getX${X}(${h(x,N,J)}), - vec2(${h(B,N,J)})); - }`}const H=I.length,D=I[I.length-1];F+=` - return getChannel( - getX${H}(${h(x,N,D)}), - vec2(${h(B,N,D)}));`;const j=(0,u.getGlsl)(i.session.backend.glContext.version),Z=` - ${A} - float getValue(${x.map(X=>"int "+X)}) { - ${F} - } - - void main() { - ${S} coords = getOutputCoords(); - int lastDim = coords.${x[v-1]}; - coords.${x[v-1]} = coords.${x[v-2]}; - coords.${x[v-2]} = lastDim; - - vec4 result = vec4(getValue(${w}), 0., 0., 0.); - - ${w[v-1]} = ${w[v-1]} + 1; - if (${w[v-1]} < ${_[v-1]}) { - result.g = getValue(${w}); - } - - ${w[v-2]} = ${w[v-2]} + 1; - if (${w[v-2]} < ${_[v-2]}) { - result.a = getValue(${w}); - } - - ${w[v-1]} = ${w[v-1]} - 1; - if (${w[v-2]} < ${_[v-2]} && - ${w[v-1]} < ${_[v-1]}) { - result.b = getValue(${w}); - } - ${j.output} = result; - } - `;return Object.assign(Object.assign({},d),{output:{dims:_,type:g[0].type,textureType:c.TextureType.packed},shaderSource:Z,hasMain:!0})})(f,t,l,o.axis)})};const h=(f,l,o)=>{const t=f.indexOf(l);return f.map((e,r)=>r===t?`${e} - ${o}`:e).join()}},2069:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseConcatAttributes=n.concat=void 0;const u=a(246),c=a(2039),p=a(1163);n.concat=(e,r,i)=>(t(r),e.session.pack&&r[0].dims.length>1?[e.run((0,p.createPackedConcatProgramInfoLoader)(e,r,i),r)]:[e.run(s(e,r,i),r)]);const s=(e,r,i)=>{const d=(g=r.length,m=i.cacheKey,{name:"Concat",inputNames:Array.from({length:g},(b,_)=>`X${_}`),inputTypes:Array(g).fill(c.TextureType.unpacked),cacheHint:m});var g,m;return Object.assign(Object.assign({},d),{get:()=>((b,_,v,w)=>{const S=v[0].dims.slice();if(w>=S.length||w<-1*S.length)throw new Error("axis specified for concat doesn't match input dimensionality");w<0&&(w=S.length+w);const A=S.slice(0);for(let L=1;L`int getTextureWhereDataResides(int index) { - ${e.map((r,i)=>`if(index<${r}) {return ${i};} -`).join("")} - }`,f=e=>h(e),l=(e,r)=>{const i=[`float fetchDataFromCorrectTexture(int textureIndex, int indices[${r}]) {`];for(let d=0;d{const r=["int getSizeInConcatAxisValueFromIndex(int index) {"];for(let i=0;i(0,u.createAttributeWithCacheKey)({axis:e.attributes.getInt("axis")});const t=e=>{if(!e||e.length<1)throw new Error("too few inputs");const r=e[0].type,i=e[0].dims.length;if(r==="string")throw new Error("string tensor is not supported yet");for(const d of e){if(d.type!==r)throw new Error("input tensors should be one type");if(d.dims.length!==i)throw new Error("input tensors should have the same shape")}}},4770:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createUnpackedGroupedConvProgramInfoLoader=void 0;const u=a(6231),c=a(5060),p=a(2039),s=a(8138),h=a(2823);n.createUnpackedGroupedConvProgramInfoLoader=(f,l,o)=>{const t=(e=l.length>2,r=o.cacheKey,{name:"GroupedConv",inputNames:e?["X","W","Bias"]:["X","W"],inputTypes:e?[p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked]:[p.TextureType.unpacked,p.TextureType.unpacked],cacheHint:r});var e,r;return Object.assign(Object.assign({},t),{get:()=>((i,d,g,m)=>{const b=d.length>2?"value += getBias(output_channel);":"",_=d[0].dims.slice(),v=d[1].dims.slice(),w=v[0]/m.group;u.Logger.verbose("GroupedConv",`autpPad:${m.autoPad}, dilations:${m.dilations}, group:${m.group}, kernelShape:${m.kernelShape}, pads:${m.pads}, strides:${m.strides}`);const S=(0,s.calculateOutputShape)(_,v,m.dilations,m.pads,m.strides),A=(0,c.getGlsl)(i.session.backend.glContext.version),{activationFunction:O,applyActivation:x}=(0,h.getActivationSnippet)(m),I=` - const ivec2 strides = ivec2(${m.strides[0]}, ${m.strides[1]}); - const ivec2 pads = ivec2(${m.pads[0]}, ${m.pads[1]}); - ${O} - void main() { - ivec4 coords = getOutputCoords(); - int batch = coords.x; - int output_channel = coords.y; - ivec2 xRCCorner = coords.zw * strides - pads; - int group_id = output_channel / ${w}; - - float value = 0.0; - for (int wInChannel = 0; wInChannel < ${v[1]}; wInChannel++) { - int input_channel = group_id * ${v[1]} + wInChannel; - for (int wHeight = 0; wHeight < ${v[2]}; wHeight++) { - int xHeight = xRCCorner.x + wHeight * ${m.dilations[0]}; - - if (xHeight < 0 || xHeight >= ${_[2]}) { - continue; - } - - for (int wWidth = 0; wWidth < ${v[3]}; wWidth++) { - int xWidth = xRCCorner.y + wWidth * ${m.dilations[1]}; - if (xWidth < 0 || xWidth >= ${_[3]}) { - continue; - } - - float xVal = getX(batch, input_channel, xWidth, xHeight); - float wVal = getW(output_channel, wInChannel, wWidth, wHeight); - value += xVal*wVal; - } - } - } - ${b} - ${x} - ${A.output} = vec4(value, .0, .0, .0); - } -`;return Object.assign(Object.assign({},g),{output:{dims:S,type:d[0].type,textureType:p.TextureType.unpacked},shaderSource:I,hasMain:!0})})(f,l,t,o)})}},1386:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.conv2DPacked=n.conv2DPackedPointwise=void 0;const u=a(8138),c=a(8555),p=a(708);n.conv2DPackedPointwise=(s,h,f)=>{const l=h[0].dims,o=h[1].dims,t=(0,u.calculateOutputShape)(l,o,f.dilations,f.pads,f.strides),e=s.reshapePacked(h[0],[l[1],l[2]*l[3]]),r=s.reshapePacked(h[1],[o[0],o[1]]),i=h.length>2?[r,e,h[2]]:[r,e],d=s.run((0,p.createPackedMatmulProgramInfoLoader)(s,i,f),i);return s.reshapePacked(d,t)},n.conv2DPacked=(s,h,f)=>{const l=h[0].dims,o=h[1].dims,t=(0,u.calculateOutputShape)(l,o,f.dilations,f.pads,f.strides),e=s.run((0,c.createPackedIm2ColProgramInfoLoader)(s,h[0],h[1],t,f),[h[0]]),r=s.reshapePacked(h[1],[o[0],o[1]*o[2]*o[3]]),i=h.length===3?[r,e,h[2]]:[r,e],d=s.run((0,p.createPackedMatmulProgramInfoLoader)(s,i,f),i);return s.reshapePacked(d,t)}},9663:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseConvTransposeAttributes=n.convTranspose=void 0;const u=a(246),c=a(5060),p=a(2039),s=a(2823),h=(r,i,d,g,m,b)=>(r-1)*i+d+(g-1)*m+1-b,f=(r,i,d,g,m)=>{const b=Math.floor(r/2);i==="SAME_UPPER"?(d[g]=b,d[m]=r-b):i==="SAME_LOWER"&&(d[g]=r-b,d[m]=b)};n.convTranspose=(r,i,d)=>(e(i,d),l(r,i,d));const l=(r,i,d)=>{const g=t(d,i);return[o(r,i,g)]},o=(r,i,d)=>r.run(((g,m,b)=>{const _=(v=m.length>2,w=b.cacheKey,{name:"ConvTranspose",inputNames:v?["X","W","B"]:["X","W"],inputTypes:v?[p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked]:[p.TextureType.unpacked,p.TextureType.unpacked],cacheHint:w});var v,w;return Object.assign(Object.assign({},_),{get:()=>((S,A,O,x)=>{const I=A.length>2?"getB(output_channel)":"0.0",N=A[0].dims,B=A[1].dims,L=B[1],F=B[0]/x.group,H=[A[0].dims[0],A[1].dims[1]*x.group,...x.outputShape],D=(0,c.getGlsl)(S.session.backend.glContext.version),{activationFunction:j,applyActivation:Z}=(0,s.getActivationSnippet)(x),X=` - const ivec2 strides = ivec2(${x.strides[0]}, ${x.strides[1]}); - const ivec2 pads = ivec2(${x.pads[0]}, ${x.pads[1]}); - ${j} - void main() { - ivec4 coords = getOutputCoords(); - int batch = coords.x; - int output_channel = coords.y; - - ivec2 loc = coords.zw + pads; - - int group_id = output_channel / ${L}; - int wOutChannel = output_channel - group_id * ${L}; - - float value = ${I}; - for (int inChannelOffset = 0; inChannelOffset < ${F}; inChannelOffset++) { - int input_channel = group_id * ${F} + inChannelOffset; - for (int wWOff = 0; wWOff < ${B[2]}; wWOff++) { - for (int wHOff = 0; wHOff < ${B[3]}; wHOff++) { - ivec2 wOff = ivec2(wWOff * ${x.dilations[0]}, wHOff * ${x.dilations[1]}); - ivec2 wLoc = loc - wOff; - ivec2 wLocIn = wLoc / strides; - if ( - wLocIn * strides == wLoc && - wLocIn.x >= 0 && wLocIn.x < ${N[2]} && - wLocIn.y >= 0 && wLocIn.y < ${N[3]} - ) { - float xVal = getX(batch, input_channel, wLocIn.y, wLocIn.x); - float wVal = getW(input_channel, wOutChannel, wHOff, wWOff); - value += xVal * wVal; - } - } - } - } - ${Z} - ${D.output} = vec4(value, .0, .0, .0); - } -`;return Object.assign(Object.assign({},O),{output:{dims:H,type:A[0].type,textureType:p.TextureType.unpacked},shaderSource:X,hasMain:!0})})(g,m,_,b)})})(r,i,d),i),t=(r,i)=>{const d=r.kernelShape.slice();if(r.kernelShape.length===0)for(let _=2;_{const N=_.length-2,B=I.length===0;for(let L=0;L{const i=r.attributes,d=(0,s.parseInternalActivationAttributes)(i),g=i.getString("auto_pad","NOTSET"),m=i.getInts("dilations",[1,1]),b=i.getInt("group",1),_=i.getInts("kernel_shape",[]),v=i.getInts("output_padding",[0,0]),w=i.getInts("output_shape",[]),S=i.getInts("pads",[0,0,0,0]),A=i.getInts("strides",[1,1]);return(0,u.createAttributeWithCacheKey)(Object.assign({autoPad:g,dilations:m,group:b,kernelShape:_,outputPadding:v,outputShape:w,pads:S,strides:A},d))};const e=(r,i)=>{if(!r||r.length!==2&&r.length!==3)throw new Error("Conv requires 2 or 3 inputs");if(r[0].dims.length!==4||r[1].dims.length!==4)throw new Error("currently only support 2-dimensional conv");if(r[0].dims[1]!==r[1].dims[0])throw new Error("FILTER_IN_CHANNEL should be equal to DATA_CHANNEL");const d=r[1].dims[1]*i.group;if(r.length===3&&(r[2].dims.length!==1||r[2].dims[0]!==d))throw new Error("invalid bias");const g=r[0].dims.length-2;if(i.dilations.length!==g)throw new Error(`dilations should be ${g}D`);if(i.strides.length!==g)throw new Error(`strides should be ${g}D`);if(i.pads.length!==2*g)throw new Error(`pads should be ${2*g}D`);if(i.outputPadding.length!==g)throw new Error(`output_padding should be ${g}D`);if(i.kernelShape.length!==0&&i.kernelShape.length!==r[1].dims.length-2)throw new Error("invalid kernel shape");if(i.outputShape.length!==0&&i.outputShape.length!==r[0].dims.length-2)throw new Error("invalid output shape");if(r[0].type!=="float32"||r[1].type!=="float32")throw new Error("ConvTranspose input(X,W) should be float tensor");if(r.length===3&&r[2].type!=="float32")throw new Error("ConvTranspose input(bias) should be float tensor")}},8138:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseConvAttributes=n.conv=n.calculateOutputShape=void 0;const u=a(246),c=a(2517),p=a(4770),s=a(1386),h=a(9828),f=a(2823),l=a(3248),o=a(5623);n.calculateOutputShape=(g,m,b,_,v)=>{const w=g[0],S=g.slice(2),A=S.length,O=m[0],x=m.slice(2).map((N,B)=>N+(N-1)*(b[B]-1)),I=S.map((N,B)=>N+_[B]+_[B+A]).map((N,B)=>Math.floor((N-x[B]+v[B])/v[B]));return[w,O].concat(...I)},n.conv=(g,m,b)=>(d(m,b),t(g,m,b));const t=(g,m,b)=>{const _=i(b,m),v=g.session.pack,w=_.kernelShape[0]===1&&_.kernelShape[1]===1;return _.group>1?[g.run((0,p.createUnpackedGroupedConvProgramInfoLoader)(g,m,_),m)]:w&&v?[e(g,m,_)]:v&&m[0].dims.length===4&&m[0].dims[0]===1&&!w?[(0,s.conv2DPacked)(g,m,_)]:[r(g,m,_)]},e=(g,m,b)=>{const _=m[0].dims,v=m[1].dims,w=(0,n.calculateOutputShape)(_,v,b.dilations,b.pads,b.strides),S=g.reshapeUnpacked(m[0],[_[1],_[2]*_[3]]),A=g.reshapeUnpacked(m[1],[v[0],v[1]]),O=m.length>2?[A,S,m[2]]:[A,S],x=g.run((0,o.createMatmulProgramInfoLoader)(O,b),O);return g.reshapeUnpacked(x,w)},r=(g,m,b)=>{const _=m[0].dims,v=m[1].dims,w=(0,n.calculateOutputShape)(_,v,b.dilations,b.pads,b.strides),S=g.run((0,l.createIm2ColProgramInfoLoader)(g,m[0],m[1],w,b),[m[0]]),A=m.length===3?[S,m[1],m[2]]:[S,m[1]];return g.run((0,h.createDotProductProgramInfoLoader)(g,m,w,b),A)},i=(g,m)=>{const b=g.kernelShape.slice();if(g.kernelShape.length===0)for(let w=2;w{const m=g.attributes,b=(0,f.parseInternalActivationAttributes)(m),_=m.getString("auto_pad","NOTSET"),v=m.getInts("dilations",[1,1]),w=m.getInt("group",1),S=m.getInts("kernel_shape",[]),A=m.getInts("pads",[0,0,0,0]),O=m.getInts("strides",[1,1]);return(0,u.createAttributeWithCacheKey)(Object.assign({autoPad:_,dilations:v,group:w,kernelShape:S,pads:A,strides:O},b))};const d=(g,m)=>{if(!g||g.length!==2&&g.length!==3)throw new Error("Conv requires 2 or 3 inputs");if(g[0].dims.length!==4||g[1].dims.length!==4)throw new Error("currently only support 2-dimensional conv");if(g[0].dims[1]!==g[1].dims[1]*m.group)throw new Error("FILTER_IN_CHANNEL should be equal to DATA_CHANNEL");if(g.length===3&&(g[2].dims.length!==1||g[1].dims[0]!==g[2].dims[0]))throw new Error("invalid bias");const b=g[0].dims.length-2;if(m.dilations.length!==b)throw new Error(`dilations should be ${b}D`);if(m.strides.length!==b)throw new Error(`strides should be ${b}D`);if(m.pads.length!==2*b)throw new Error(`pads should be ${2*b}D`);if(m.kernelShape.length!==0&&m.kernelShape.length!==g[1].dims.length-2)throw new Error("invalid kernel shape");if(g[0].type!=="float32"||g[1].type!=="float32")throw new Error("Conv input(X,W) should be float tensor");if(g.length===3&&g[2].type!=="float32")throw new Error("Conv input(bias) should be float tensor")}},5193:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseDepthToSpaceAttributes=n.depthToSpace=void 0;const u=a(3738);n.depthToSpace=(p,s,h)=>{c(s);const f=h.blocksize,l=f*f,o=h.mode==="DCR"?[0,3,4,1,5,2]:[0,1,4,2,5,3],t=h.mode==="DCR"?[s[0].dims[0],f,f,s[0].dims[1]/l,s[0].dims[2],s[0].dims[3]]:[s[0].dims[0],s[0].dims[1]/l,f,f,s[0].dims[2],s[0].dims[3]],e=p.reshapeUnpacked(s[0],t),r={perm:o,cacheKey:`${o}`},[i]=(0,u.transpose)(p,[e],r),d=[s[0].dims[0],s[0].dims[1]/l,s[0].dims[2]*f,s[0].dims[3]*f];return[p.reshapeUnpacked(i,d)]},n.parseDepthToSpaceAttributes=p=>{const s=p.attributes.getInt("blocksize");if(s<1)throw new Error(`blocksize must be >= 1, but got : ${s} for DepthToSpace`);const h=p.attributes.getString("mode","DCR");if(h!=="DCR"&&h!=="CRD")throw new Error(`unrecognized mode: ${h} for DepthToSpace`);return{mode:h,blocksize:s}};const c=p=>{if(p.length!==1)throw new Error(`DepthToSpace expect 1 inputs, but got ${p.length}`);if(p[0].type==="string"||p[0].dims.length!==4)throw new TypeError("DepthToSpace input should be a 4-D numeric tensor")}},9828:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createDotProductProgramInfoLoader=void 0;const u=a(2517),c=a(5060),p=a(2039),s=a(2823),h=a(3248);n.createDotProductProgramInfoLoader=(f,l,o,t)=>{const e=((r,i)=>({name:"ConvDotProduct",inputNames:r?["Im2Col","K","B"]:["Im2Col","K"],inputTypes:r?[p.TextureType.unpacked,p.TextureType.packedLastDimension,p.TextureType.unpacked]:[p.TextureType.unpacked,p.TextureType.packedLastDimension],cacheKey:i.activationCacheKey}))(l.length>2,t);return Object.assign(Object.assign({},e),{get:()=>((r,i,d,g,m)=>{const b=d[0].dims,_=d[1].dims,v=[_[0],Math.ceil(b[1]*_[2]*_[3]/4)],w=(0,h.calculateIm2ColDims)(b,_,g),[S,A]=r.calculateTextureWidthAndHeight(v,p.TextureType.packedLastDimension),O=u.ShapeUtil.computeStrides(w),[x,I]=r.calculateTextureWidthAndHeight(w,p.TextureType.packedLastDimension),N=g.length,B=d.length<3?"0.0":"_B(b)",L=Math.ceil(b[1]*_[2]*_[3]/4),{activationFunction:F,applyActivation:H}=(0,s.getActivationSnippet)(m),D=(0,c.getGlsl)(r.session.backend.glContext.version),j=` -${F} -float process(int indices[${N}]) { - int b[1]; - b[0] = indices[1]; - int im2col[4]; - im2col[0] = indices[0]; - im2col[1] = indices[2]; - im2col[2] = indices[3]; - int im2colOffset = im2col[0] * ${O[0]} + im2col[1] * ${O[1]} + im2col[2] * ${O[2]}; - int kernelOffset = indices[1] * ${v[1]}; - float value = ${B}; - for (int i = 0; i < ${L}; ++i) { - vec2 im2colCoords = offsetToCoords(im2colOffset, ${x}, ${I}); - vec2 kernelCoords = offsetToCoords(kernelOffset, ${S}, ${A}); - value += dot(${D.texture2D}(Im2Col, im2colCoords), ${D.texture2D}(K, kernelCoords)); - ++im2colOffset; - ++kernelOffset; - } - ${H} - return value; -}`;return Object.assign(Object.assign({},i),{output:{dims:g,type:d[0].type,textureType:p.TextureType.unpacked},shaderSource:j})})(f,e,l,o,t)})}},7992:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseFlattenAttributes=n.flatten=void 0;const u=a(2517);n.flatten=(p,s,h)=>{c(s,h);const f=u.ShapeUtil.flattenShape(s[0].dims,h);return[p.reshapeUnpacked(s[0],f)]},n.parseFlattenAttributes=p=>p.attributes.getInt("axis",1);const c=(p,s)=>{if(!p||p.length!==1)throw new Error("Flatten requires 1 input.");const h=p[0].dims.length;if(h===0)throw new Error("scalar tensor is not supported.");if(s<-h||s>h)throw new Error("Invalid axis");if(p[0].type==="string")throw new Error("string tensor is not supported.")}},2823:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseInternalActivationAttributes=n.getActivationSnippet=void 0;const u=a(2517),c=a(4909);n.getActivationSnippet=function(p){let s;switch(p.activation){case"Relu":s=(0,c.glslRelu)();break;case"Sigmoid":s=(0,c.glslSigmoid)();break;case"Clip":s=(0,c.glslClip)(p.clipMin,p.clipMax);break;default:return{activationFunction:"",applyActivation:""}}const h=s.name;return{activationFunction:s.body,applyActivation:`value = ${h}_(value);`}},n.parseInternalActivationAttributes=p=>{const s=p.getString("activation","");if(s==="Clip"){const[h,f]=p.getFloats("activation_params",[u.MIN_CLIP,u.MAX_CLIP]);return{activation:s,clipMax:f,clipMin:h,activationCacheKey:`${s}:${h},${f}`}}return{activation:s,activationCacheKey:s}}},1253:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseGatherAttributes=n.gather=void 0;const u=a(246),c=a(782),p=a(2517),s=a(2039);n.gather=(o,t,e)=>(l(t,e.axis),[o.run(f(o,t,e),t)]),n.parseGatherAttributes=o=>(0,u.createAttributeWithCacheKey)({axis:o.attributes.getInt("axis",0)});const h={name:"Gather",inputNames:["A","B"],inputTypes:[s.TextureType.unpacked,s.TextureType.unpacked]},f=(o,t,e)=>{const r=Object.assign(Object.assign({},h),{cacheHint:e.cacheKey});return Object.assign(Object.assign({},r),{get:()=>((i,d,g,m)=>{const b=g[0].dims.slice(),_=g[1].dims.slice(),v=new Array(b.length+_.length-1);m=p.ShapeUtil.normalizeAxis(m,b.length);const w=[];for(let A=0;A{if(!o||o.length!==2)throw new Error("Gather requires 2 inputs.");const e=o[0].dims.length;if(e<1)throw new Error("Invalid input shape.");if(t<-e||t>e-1)throw new Error("Invalid axis.");if(c.NUMBER_TYPES.indexOf(o[0].type)===-1)throw new Error("Invaid input type.");if(o[1].type!=="int32"&&o[1].type!=="int16")throw new Error("Invaid input type.")}},4776:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseGemmAttributesV11=n.parseGemmAttributesV7=n.gemm=void 0;const u=a(246),c=a(2517),p=a(2039);n.gemm=(o,t,e)=>(l(t,e),[o.run(h(t,e),t)]);const s=(o,t)=>{const e=o.attributes.getInt("transA",0)!==0,r=o.attributes.getInt("transB",0)!==0,i=o.attributes.getFloat("alpha",1),d=o.attributes.getFloat("beta",1);return(0,u.createAttributeWithCacheKey)({transA:e,transB:r,alpha:i,beta:d,isOptionalC:t})};n.parseGemmAttributesV7=o=>s(o,!1),n.parseGemmAttributesV11=o=>s(o,!0);const h=(o,t)=>{const e={name:"Gemm",inputNames:o.length===3?["A","B","C"]:["A","B"],inputTypes:o.length===3?[p.TextureType.unpacked,p.TextureType.unpacked,p.TextureType.unpacked]:[p.TextureType.unpacked,p.TextureType.unpacked],key:t.cacheKey};return Object.assign(Object.assign({},e),{get:()=>f(e,o,t)})},f=(o,t,e)=>{const r=t[0].dims.slice(),i=t[1].dims.slice(),[d,g]=c.GemmUtil.getShapeOfGemmResult(r,e.transA,i,e.transB,t.length===3?t[2].dims:void 0),m=[d,g];if(!m)throw new Error("Can't use gemm on the given tensors");let b=r[r.length-1],_="";e.transA&&(b=r[0]),e.transA&&e.transB?_="value += _A_T(a) * _B_T(b);":e.transA&&!e.transB?_="value += _A_T(a) * _B(b);":!e.transA&&e.transB?_="value += _A(a) * _B_T(b);":e.transA||e.transB||(_="value += _A(a) * _B(b);");const v=m.length,w=` - float process(int indices[${v}]) { - int a[${v}]; - int b[${v}]; - ${t.length===3?`int c[${t[2].dims.length}];`:""} - - copyVec(indices, a); - copyVec(indices, b); - ${t.length===3?"bcastIndices_C(indices, c);":""} - - float value = 0.0; - for (int k=0; k<${b}; ++k) { - a[${v-1}] = k; - b[${v-2}] = k; - ${_} - } - - value = value * alpha; - ${t.length===3?"value += beta * _C(c);":""} - return value; - }`;return Object.assign(Object.assign({},o),{output:{dims:m,type:t[0].type,textureType:p.TextureType.unpacked},variables:[{name:"alpha",type:"float",data:e.alpha},{name:"beta",type:"float",data:e.beta}],shaderSource:w})},l=(o,t)=>{if(!o)throw new Error("Input is missing");if(t.isOptionalC&&(o.length<2||o.length>3))throw new Error("Invaid input shape.");if(!t.isOptionalC&&o.length!==3)throw new Error("Gemm requires 3 inputs");if(o.length===3&&o[2].dims.length!==1&&o[2].dims.length!==2)throw new Error("Invalid input shape of C");if(o[0].type!=="float32"&&o[0].type!=="float64"||o[1].type!=="float32"&&o[1].type!=="float64"||o.length===3&&o[2].type!=="float32"&&o[2].type!=="float64")throw new Error("Invalid input type.");if(o[0].type!==o[1].type||o.length===3&&o[0].type!==o[2].type)throw new Error("Input types are mismatched")}},8555:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createPackedIm2ColProgramInfoLoader=void 0;const u=a(5060),c=a(2039),p=a(2827);n.createPackedIm2ColProgramInfoLoader=(s,h,f,l,o)=>{const t=(e=o.cacheKey,{name:"Im2Col (packed)",inputNames:["A"],inputTypes:[c.TextureType.packed],cacheHint:e});var e;return Object.assign(Object.assign({},t),{get:()=>((r,i,d,g,m,b)=>{const _=d.dims,v=g.dims,w=m.length,S=[v[1]*v[2]*v[3],m[2]*m[3]],A=v[2]*v[3],O=(0,p.unpackFromChannel)(),x=(0,u.getGlsl)(r.session.backend.glContext.version);let I="";for(let B=0;B<=1;B++)for(let L=0;L<=1;L++)I+=` - blockIndex = rc.x + ${L}; - pos = rc.y + ${B}; - - if(blockIndex < ${S[1]} && pos < ${S[0]}) { - offsetY = int(blockIndex / (${m[w-1]})) * ${b.strides[0]} - - ${b.pads[0]}; - d0 = offsetY + ${b.dilations[0]} * (imod(pos, ${A}) / ${v[2]}); - - if(d0 < ${_[2]} && d0 >= 0) { - offsetX = imod(blockIndex, ${m[w-1]}) * ${b.strides[1]} - - ${b.pads[1]}; - d1 = offsetX + ${b.dilations[1]} * imod(imod(pos, ${A}), ${v[2]}); - - if(d1 < ${_[3]} && d1 >= 0) { - - ch = int(float(pos)/ ${A}.); - innerDims = vec2(d0, d1); - result[${2*B+L}] = getChannel( - getA(0, ch, int(innerDims.x), - int(innerDims.y)), innerDims); - } - } - } - - `;const N=` - ${O} - - void main() { - ivec2 rc = getOutputCoords(); - vec4 result = vec4(0.0); - int blockIndex, pos, offsetY, d0, offsetX, d1, ch; - vec2 innerDims; - ${I} - ${x.output} = result; - } - `;return Object.assign(Object.assign({},i),{output:{dims:S,type:d.type,textureType:c.TextureType.packed},shaderSource:N,hasMain:!0})})(s,t,h,f,l,o)})}},3248:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.calculateIm2ColDims=n.createIm2ColProgramInfoLoader=void 0;const u=a(2039);n.createIm2ColProgramInfoLoader=(c,p,s,h,f)=>{const l=(o=f.cacheKey,{name:"Im2Col",inputNames:["X"],inputTypes:[u.TextureType.unpacked],cacheHint:o});var o;return Object.assign(Object.assign({},l),{get:()=>((t,e,r,i,d,g)=>{const m=r.dims,b=i.dims,_=d.length,v=(0,n.calculateIm2ColDims)(m,b,d,4),w=` - const int XC = ${m[1]}; - const int XH = ${m[2]}; - const int XW = ${m[3]}; - const int KH = ${g.kernelShape[0]}; - const int KW = ${g.kernelShape[1]}; - const int dilationH = ${g.dilations[0]}; - const int dilationW = ${g.dilations[1]}; - const int strideH = ${g.strides[0]}; - const int strideW = ${g.strides[1]}; - const int padH = ${g.pads[0]}; - const int padW = ${g.pads[1]}; - const int KHKW = KH*KW; - const int XCKHKW = XC * KHKW; - const int outputChannels = 4; - vec4 process(int indices[${_}]) { - int b = indices[0]; // batch size - int oh = indices[1] * strideH - padH; //output height - int ow = indices[2] * strideW - padW; //output width - int p = indices[3] * outputChannels; //patch - vec4 value = vec4(0.0); - for(int i=0; i < outputChannels; ++i) { - if(p < XCKHKW) { - int patchC = p / KHKW; - int patchH = (p - patchC*KHKW) / KW; - int patchW = (p - patchC*KHKW) - patchH * KW; - int xh2 = oh + patchH * dilationH; - int xw2 = ow + patchW * dilationW; - int x[${m.length}]; - x[0] = b; - x[1] = patchC; - x[2] = xh2; - x[3] = xw2; - if(xh2 >= 0 && - xh2 < XH && - xw2 >= 0 && - xw2 < XW) { - value[i] = _X(x); - } - } - ++p; - } - return value; - } - `;return Object.assign(Object.assign({},e),{output:{dims:v,type:r.type,textureType:u.TextureType.packedLastDimension},shaderSource:w})})(0,l,p,s,h,f)})},n.calculateIm2ColDims=(c,p,s,h=4)=>[s[0],s[2],s[3],Math.ceil(c[1]*p[2]*p[3]/h)]},6572:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseImageScalerAttributes=n.imageScaler=void 0;const u=a(246),c=a(2039);n.imageScaler=(l,o,t)=>(f(o),[l.run(s(l,o,t),o)]),n.parseImageScalerAttributes=l=>{const o=l.attributes.getFloat("scale"),t=l.attributes.getFloats("bias");return(0,u.createAttributeWithCacheKey)({scale:o,bias:t})};const p={name:"ImageScaler",inputNames:["X"],inputTypes:[c.TextureType.unpacked]},s=(l,o,t)=>{const e=Object.assign(Object.assign({},p),{cacheHint:t.cacheKey});return Object.assign(Object.assign({},e),{get:()=>((r,i,d,g)=>{const m=d[0].dims.slice(),b=m.length,_=` - ${h(g.bias.length)} - float process(int indices[${b}]) { - return _X(indices) * scale + getBias(bias, indices[1]); - }`;return Object.assign(Object.assign({},i),{output:{dims:m,type:d[0].type,textureType:c.TextureType.unpacked},variables:[{name:"bias",type:"float",arrayLength:g.bias.length,data:g.bias},{name:"scale",type:"float",data:g.scale}],shaderSource:_})})(0,e,o,t)})},h=l=>{const o=[`float getBias(float bias[${l}], int channel) {`];for(let t=0;t{if(!l||l.length!==1)throw new Error("ImageScaler requires 1 input.");if(l[0].dims.length!==4)throw new Error("Invalid input shape.");if(l[0].type!=="float32"&&l[0].type!=="float64")throw new Error("Invalid input type.")}},3346:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseInstanceNormalizationAttributes=n.instanceNormalization=void 0;const u=a(5060),c=a(2039);n.instanceNormalization=(o,t,e)=>{l(t);const r=o.run(s(t[0]),t);return[o.run(f(o,t[0],e,r.dims),[t[0],r,t[1],t[2]])]},n.parseInstanceNormalizationAttributes=o=>o.attributes.getFloat("epsilon",1e-5);const p={name:"InstanceNormalization_MeanAndVariance",inputNames:["X"],inputTypes:[c.TextureType.unpacked]},s=o=>Object.assign(Object.assign({},p),{get:()=>((t,e)=>{const r=e.dims.slice(),i=r[1],d=r[2]*r[3],g=[r[0],i],m=` - vec4 process(int[2] indices) { - vec4 v = vec4(0.0); - int a[4]; - a[0] = indices[0]; - a[1] = indices[1]; - float temp = 0.0; - for(int a2=0; a2<${r[2]}; a2++) { - a[2] = a2; - for(int a3=0; a3<${r[3]}; a3++) { - a[3] = a3; - float x = _X(a); - temp += x; - } - } - float mean = temp / float(${d}); - temp = 0.0; - for(int a2=0; a2<${r[2]}; a2++) { - a[2] = a2; - for(int a3=0; a3<${r[3]}; a3++) { - a[3] = a3; - float x = _X(a); - temp += (x - mean) * (x - mean); - } - } - v.r = mean; - v.g = temp / float(${d}); - - return v; - }`;return Object.assign(Object.assign({},t),{output:{dims:g,type:e.type,textureType:c.TextureType.packedLastDimension},shaderSource:m})})(p,o)}),h={name:"InstanceNormalization_ComputeOutput",inputNames:["X","MeanAndVariance","Scale","B"],inputTypes:[c.TextureType.unpacked,c.TextureType.packedLastDimension,c.TextureType.unpacked,c.TextureType.unpacked]},f=(o,t,e,r)=>{const i=Object.assign(Object.assign({},h),{cacheHint:`${e}`});return Object.assign(Object.assign({},i),{get:()=>((d,g,m,b,_)=>{const v=(0,u.getGlsl)(d.session.backend.glContext.version),[w,S]=d.calculateTextureWidthAndHeight(_,c.TextureType.packedLastDimension),[A,O]=[w/4,S],x=` - vec4 get_MeanAndVariance(int[2] mv) { - int offset = indicesToOffset_MeanAndVariance(mv); - vec2 coords = offsetToCoords(offset, ${A}, ${O}); - return ${v.texture2D}(MeanAndVariance, coords); - } - - float process(int[4] indices) { - int mv[2]; - mv[0] = indices[0]; - mv[1] = indices[1]; - vec4 mean_and_variance = get_MeanAndVariance(mv); - float mean = mean_and_variance.r; - float variance = mean_and_variance.g; - - int sb[1]; - sb[0] = indices[1]; - float scale = _Scale(sb); - float b = _B(sb); - - return scale * (_X(indices) - mean) / sqrt(variance + epsilon) + b; - }`;return Object.assign(Object.assign({},g),{output:{dims:m.dims,type:m.type,textureType:c.TextureType.unpacked},variables:[{name:"epsilon",type:"float",data:b}],shaderSource:x})})(o,i,t,e,r)})},l=o=>{if(!o||o.length!==3)throw new Error("InstanceNormalization requires 3 inputs.");const t=o[0],e=o[1],r=o[2];if(t.dims.length<3||e.dims.length!==1||r.dims.length!==1)throw new Error("Invalid input shape.");if(e.dims[0]!==t.dims[1]||r.dims[0]!==t.dims[1])throw new Error("Input shapes are mismatched.");if(t.type!=="float32"&&t.type!=="float64"||e.type!=="float32"&&e.type!=="float64"||r.type!=="float32"&&r.type!=="float64")throw new Error("Invalid input type.");if(o[0].dims.length!==4)throw new Error("Only support 4-D input shape.")}},708:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createPackedMatmulProgramInfoLoader=void 0;const u=a(2517),c=a(5060),p=a(2039),s=a(9390),h=a(2823),f=a(5623);n.createPackedMatmulProgramInfoLoader=(l,o,t)=>{const e=(r=o.length>2,i=t.activationCacheKey,{name:"MatMul (packed)",inputNames:r?["A","B","Bias"]:["A","B"],inputTypes:r?[p.TextureType.packed,p.TextureType.packed,p.TextureType.packed]:[p.TextureType.packed,p.TextureType.packed],cacheHint:i});var r,i;return Object.assign(Object.assign({},e),{get:()=>((d,g,m,b)=>{const _=m.length>2,v=_?"value += getBiasForMatmul();":"",w=m[0].dims,S=m[1].dims,A=u.BroadcastUtil.calcShape(w,S,!0),O=!u.ShapeUtil.areEqual(m[0].dims,m[1].dims);if(!A)throw new Error("Can't use matmul on the given tensors");const x=w[w.length-1],I=Math.ceil(x/2),N=w.length,B=S.length,L=(0,c.getGlsl)(d.session.backend.glContext.version),F=(0,s.getCoordsDataType)(A.length),H=A.length,D=(0,s.getGlChannels)(),{activationFunction:j,applyActivation:Z}=(0,h.getActivationSnippet)(b),X=_?`${(0,f.getBiasForMatmul)(F,D,m[2].dims,A,!0)}`:"",J=O?`${function(ve,oe,_e,be){let ke=[],Fe=[];const xe=_e[0].dims,Ne=_e[1].dims,Ce=xe.length,Ee=Ne.length,Oe=be.length,Be=Oe-Ce,Ge=Oe-Ee;ke=xe.map((Ie,je)=>`coords.${oe[je+Be]}`),ke[Ce-1]="i*2",ke.join(", "),Fe=Ne.map((Ie,je)=>`coords.${oe[je+Ge]}`),Fe[Ee-2]="i*2",Fe.join(", ");const Ve=u.BroadcastUtil.getBroadcastDims(xe,be),Xe=u.BroadcastUtil.getBroadcastDims(Ne,be),Ze=Ve.map(Ie=>`coords.${oe[Ie+Be]} = 0;`).join(` -`),qe=Xe.map(Ie=>`coords.${oe[Ie+Ge]} = 0;`).join(` -`),Ue=`int lastDim = coords.${oe[Oe-1]}; - coords.${oe[Oe-1]} = coords.${oe[Oe-2]}; - coords.${oe[Oe-2]} = lastDim;`;return` -vec4 getAAtOutCoordsMatmul(int i) { - ${ve} coords = getOutputCoords(); - ${Ue} - ${Ze} - vec4 outputValue = getA(${ke}); - return outputValue; -} - -vec4 getBAtOutCoordsMatmul(int i) { - ${ve} coords = getOutputCoords(); - ${Ue} - ${qe} - vec4 outputValue = getB(${Fe}); - return outputValue; -}`}(F,D,m,A)}`:"",ee=O?"getAAtOutCoordsMatmul(i)":`getA(${function(ve,oe){let _e="";for(let be=0;be{Object.defineProperty(n,"__esModule",{value:!0}),n.getBiasForMatmul=n.createMatmulProgramInfoLoader=n.parseMatMulAttributes=n.matMul=void 0;const u=a(2517),c=a(2039),p=a(9390),s=a(2823),h=a(708);function f(t,e){const r=(i=t.length>2,d=e.activationCacheKey,{name:"MatMul",inputNames:i?["A","B","Bias"]:["A","B"],inputTypes:i?[c.TextureType.unpacked,c.TextureType.unpacked,c.TextureType.unpacked]:[c.TextureType.unpacked,c.TextureType.unpacked],cacheHint:d});var i,d;return Object.assign(Object.assign({},r),{get:()=>function(g,m,b){const _=m[0].dims,v=m[1].dims,w=u.BroadcastUtil.calcShape(_,v,!0);if(!w)throw new Error("Can't use matmul on the given tensors");const S=(0,p.getCoordsDataType)(w.length),A=(0,p.getGlChannels)(),{activationFunction:O,applyActivation:x}=(0,s.getActivationSnippet)(b),I=m.length>2,N=I?"value += getBiasForMatmul();":"",B=I?`${o(S,A,m[2].dims,w,!1)}`:"",L=w.length,F=_.length,H=v.length,D=` - ${O} - ${B} - float process(int indices[${L}]) { - int a[${F}]; - int b[${H}]; - bcastMatmulIndices_A(indices, a); - bcastMatmulIndices_B(indices, b); - - float value; - for (int k=0; k<${_[_.length-1]}; ++k) { - a[${F-1}] = k; - b[${H-2}] = k; - value += _A(a) * _B(b); - } - ${N} - ${x} - return value; - }`;return Object.assign(Object.assign({},g),{output:{dims:w,type:m[0].type,textureType:c.TextureType.unpacked},shaderSource:D})}(r,t,e)})}n.matMul=(t,e,r)=>(l(e),t.session.pack?[t.run((0,h.createPackedMatmulProgramInfoLoader)(t,e,r),e)]:[t.run(f(e,r),e)]),n.parseMatMulAttributes=t=>(0,s.parseInternalActivationAttributes)(t.attributes),n.createMatmulProgramInfoLoader=f;const l=t=>{if(!t||t.length!==2)throw new Error("MatMul requires 2 inputs.");if(t[0].dims[t[0].dims.length-1]!==t[1].dims[t[1].dims.length-2])throw new Error("shared dimension does not match.");if(t[0].type!=="float32"&&t[0].type!=="float64"||t[1].type!=="float32"&&t[1].type!=="float64")throw new Error("inputs should be float type");if(t[0].type!==t[1].type)throw new Error("inputs types should match")};function o(t,e,r,i,d){let g="";const m=r.length,b=i.length,_=b-m;g=b<2&&m>0?"coords":r.map((S,A)=>`coords.${e[A+_]}`).join(", ");const v=u.BroadcastUtil.getBroadcastDims(r,i).map(S=>`coords.${e[S+_]} = 0;`).join(` -`);let w="vec4(outputValue.xx, outputValue.yy)";return u.ShapeUtil.size(r)===1&&(w="vec4(outputValue.x)"),d?` -vec4 getBiasForMatmul() { - ${t} coords = getOutputCoords(); - ${v} - vec4 outputValue = getBias(${g}); - return ${w}; -}`:` -float getBiasForMatmul() { - ${t} coords = getOutputCoords(); - ${v} - return getBias(coords.x); -}`}n.getBiasForMatmul=o},2403:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createPackProgramInfoLoader=void 0;const u=a(5060),c=a(2039),p=a(9390),s=a(2827),h={name:"pack",inputNames:["A"],inputTypes:[c.TextureType.unpackedReversed]};n.createPackProgramInfoLoader=(f,l)=>Object.assign(Object.assign({},h),{get:()=>((o,t)=>{const e=(0,u.getGlsl)(o.session.backend.glContext.version),r=t.dims,i=r.length,d=t.dims.length,g=(0,p.getCoordsDataType)(d),m=(0,s.getChannels)("rc",d),b=(_=d,v=m,w=r[r.length-2],S=r[r.length-1],_===0||_===1?"":` - int r = ${v[_-2]}; - int c = ${v[_-1]}; - int rp1 = ${v[_-2]} + 1; - int cp1 = ${v[_-1]} + 1; - bool rEdge = rp1 >= ${S}; - bool cEdge = cp1 >= ${w}; - `);var _,v,w,S;let A;A=i===0?[1,1]:i===1?[r[0],1]:[r[d-1],r[d-2]];const O=function(N,B,L){if(N===0)return"false";if(N===1)return`rc > ${B[0]}`;let F="";for(let H=N-2;H= ${B[H-N+2]}`,H= ${N[0]} ? 0. : getA(rc + 1), - 0, 0`;let F="";if(L>2)for(let H=0;H{Object.defineProperty(n,"__esModule",{value:!0}),n.unpackFromChannel=n.getChannels=n.getVecChannels=void 0;const u=a(9390);function c(p,s){return(0,u.getGlChannels)(s).map(h=>`${p}.${h}`)}n.getVecChannels=c,n.getChannels=function(p,s){return s===1?[p]:c(p,s)},n.unpackFromChannel=function(){return` - float getChannel(vec4 frag, int dim) { - int modCoord = imod(dim, 2); - return modCoord == 0 ? frag.r : frag.g; - } - - float getChannel(vec4 frag, vec2 innerDims) { - vec2 modCoord = mod(innerDims, 2.); - return modCoord.x == 0. ? - (modCoord.y == 0. ? frag.r : frag.g) : - (modCoord.y == 0. ? frag.b : frag.a); - } - `}},2870:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parsePadAttributesV11=n.padV11=n.parsePadAttributesV2=n.padV2=void 0;const u=a(246),c=a(2517),p=a(5060),s=a(2039),h={name:"Pad",inputNames:["A"],inputTypes:[s.TextureType.unpacked]};n.padV2=(g,m,b)=>(o(m),[g.run(Object.assign(Object.assign({},h),{cacheHint:b.cacheKey,get:()=>l(g,m[0],b)}),m)]),n.parsePadAttributesV2=g=>{const m=g.attributes.getString("mode","constant"),b=g.attributes.getFloat("value",0),_=g.attributes.getInts("pads");return(0,u.createAttributeWithCacheKey)({mode:m,value:b,pads:_})},n.padV11=(g,m,b)=>{t(m);const _=f(g,m,b);return(0,n.padV2)(g,[m[0]],_)},n.parsePadAttributesV11=g=>g.attributes.getString("mode","constant");const f=(g,m,b)=>{if(!g.session.isInitializer(m[1].dataId)||m.length>=3&&!g.session.isInitializer(m[2].dataId))throw new Error("dynamic pad attributes are not allowed");const _=Array.from(m[1].integerData),v=m.length>=3?m[2].floatData[0]:0;return(0,u.createAttributeWithCacheKey)({mode:b,pads:_,value:v})},l=(g,m,b)=>{const _=c.ShapeUtil.padShape(m.dims.slice(),b.pads),v=_.length,w=` - ${e(g,m,b)} - float process(int[${v}] indices) { - return padA(indices); - }`;return{name:"Pad",inputNames:["A"],inputTypes:[s.TextureType.unpacked],output:{dims:_,type:m.type,textureType:s.TextureType.unpacked},shaderSource:w}},o=g=>{if(!g||g.length!==1)throw new Error("Pad requires 1 input");if(g[0].type!=="float32"&&g[0].type!=="float64")throw new Error("Invalid input type.")},t=g=>{if(!g||g.length!==2&&g.length!==3)throw new Error("Pad requires 2 or 3 inputs");if(g[1].type!=="int32")throw new Error("Invalid input type.");if(g.length>=3&&g[2].type==="string")throw new Error("Invalid input type.")},e=(g,m,b)=>{const _=(0,p.getGlsl)(g.session.backend.glContext.version),[v,w]=g.calculateTextureWidthAndHeight(m.dims,s.TextureType.unpacked),S=c.ShapeUtil.computeStrides(m.dims);switch(b.mode){case"constant":return r(_,m.dims,S,v,w,b.pads,b.value);case"reflect":return i(_,m.dims,S,v,w,b.pads);case"edge":return d(_,m.dims,S,v,w,b.pads);default:throw new Error("Invalid mode")}},r=(g,m,b,_,v,w,S)=>{const A=m.length;let O="";for(let x=A-1;x>=0;--x)O+=` - k = m[${x}] - ${w[x]}; - if (k < 0) return constant; - if (k >= ${m[x]}) return constant; - offset += k * ${b[x]}; - `;return` - float padA(int m[${A}]) { - const float constant = float(${S}); - int offset = 0; - int k = 0; - ${O} - vec2 coords = offsetToCoords(offset, ${_}, ${v}); - float value = getColorAsFloat(${g.texture2D}(A, coords)); - return value; - } - `},i=(g,m,b,_,v,w)=>{const S=m.length;let A="";for(let O=S-1;O>=0;--O)A+=` - k = m[${O}] - ${w[O]}; - if (k < 0) { k = -k; } - { - const int _2n_1 = ${2*(m[O]-1)}; - k = int( mod( float(k), float(_2n_1) ) ) ; - if(k >= ${m[O]}) { k = _2n_1 - k; } - } - offset += k * ${b[O]}; - `;return` - float padA(int m[${S}]) { - int offset = 0; - int k = 0; - ${A} - vec2 coords = offsetToCoords(offset, ${_}, ${v}); - float value = getColorAsFloat(${g.texture2D}(A, coords)); - return value; - } - `},d=(g,m,b,_,v,w)=>{const S=m.length;let A="";for(let O=S-1;O>=0;--O)A+=` - k = m[${O}] - ${w[O]}; - if (k < 0) k = 0; - if (k >= ${m[O]}) k = ${m[O]-1}; - offset += k * ${b[O]}; - `;return` - float padA(int m[${S}]) { - int offset = 0; - int k = 0; - ${A} - vec2 coords = offsetToCoords(offset, ${_}, ${v}); - float value = getColorAsFloat(${g.texture2D}(A, coords)); - return value; - } - `}},2143:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.globalMaxPool=n.parseMaxPoolAttributes=n.maxPool=n.parseGlobalAveragePoolAttributes=n.globalAveragePool=n.parseAveragePoolAttributes=n.averagePool=void 0;const u=a(246),c=a(2517),p=a(2039);n.averagePool=(d,g,m)=>{t(g);const b={name:"AveragePool",inputNames:["X"],inputTypes:[p.TextureType.unpacked],cacheHint:m.cacheKey};return[d.run(Object.assign(Object.assign({},b),{get:()=>s(g,b,!1,m)}),g)]},n.parseAveragePoolAttributes=d=>{const g=d.attributes.getString("auto_pad","NOTSET"),m=d.attributes.getInt("ceil_mode",0),b=d.attributes.getInt("count_include_pad",0)!==0,_=d.attributes.getInts("kernel_shape"),v=d.attributes.getInts("strides",[]),w=d.attributes.getInts("pads",[]);if(m!==0)throw new Error("using ceil() in shape computation is not yet supported for AveragePool");return(0,u.createAttributeWithCacheKey)({autoPad:g,ceilMode:m,countIncludePad:b,kernelShape:_,strides:v,pads:w})};const s=(d,g,m,b)=>{const[_,v]=f(d,b,m),w=c.ShapeUtil.size(_.kernelShape);let S="";_.countIncludePad?S+=`value /= float(${w});`:S+=`value /= float(${w} - pad);`;const A=` - ${e(d[0].dims,_,"value += _X(x);",S,"0.0")} - `;return Object.assign(Object.assign({},g),{output:{dims:v,type:d[0].type,textureType:p.TextureType.unpacked},shaderSource:A})};n.globalAveragePool=(d,g,m)=>{t(g);const b={name:"GlobalAveragePool",inputNames:["X"],inputTypes:[p.TextureType.unpacked],cacheHint:`${m.countIncludePad}`};return[d.run(Object.assign(Object.assign({},b),{get:()=>s(g,b,!0,m)}),g)]},n.parseGlobalAveragePoolAttributes=d=>{const g=d.attributes.getInt("count_include_pad",0)!==0;return(0,u.createAttributeWithCacheKey)({autoPad:"",ceilMode:0,countIncludePad:g,kernelShape:[],strides:[],pads:[]})},n.maxPool=(d,g,m)=>{t(g);const b={name:"MaxPool",inputNames:["X"],inputTypes:[p.TextureType.unpacked],cacheHint:m.cacheKey};return[d.run(Object.assign(Object.assign({},b),{get:()=>h(g,b,!1,m)}),g)]},n.parseMaxPoolAttributes=d=>{const g=d.attributes.getString("auto_pad","NOTSET"),m=d.attributes.getInt("ceil_mode",0),b=d.attributes.getInts("kernel_shape"),_=d.attributes.getInts("strides",[]),v=d.attributes.getInts("pads",[]),w=d.attributes.getInt("storage_order",0),S=d.attributes.getInts("dilations",[]);if(w!==0)throw new Error("column major storage order is not yet supported for MaxPool");if(m!==0)throw new Error("using ceil() in shape computation is not yet supported for MaxPool");return(0,u.createAttributeWithCacheKey)({autoPad:g,ceilMode:m,countIncludePad:!1,kernelShape:b,strides:_,pads:v,storageOrder:w,dilations:S})};const h=(d,g,m,b)=>{const[_,v]=f(d,b,m),w=` - ${e(d[0].dims,_,` - value = max(_X(x), value); - `,"","-1e5")} - `;return Object.assign(Object.assign({},g),{output:{dims:v,type:d[0].type,textureType:p.TextureType.unpacked},shaderSource:w})},f=(d,g,m)=>{const b=d[0].dims.slice(),_=Object.hasOwnProperty.call(g,"dilations"),v=g.kernelShape.slice(),w=g.strides.slice(),S=_?g.dilations.slice():[],A=g.pads.slice();c.PoolConvUtil.adjustPoolAttributes(m,b,v,w,S,A);const O=c.PoolConvUtil.computePoolOutputShape(m,b,w,S,v,A,g.autoPad),x=Object.assign({},g);return _?Object.assign(x,{kernelShape:v,strides:w,pads:A,dilations:S,cacheKey:g.cacheKey}):Object.assign(x,{kernelShape:v,strides:w,pads:A,cacheKey:g.cacheKey}),[x,O]},l={autoPad:"",ceilMode:0,countIncludePad:!1,kernelShape:[],strides:[],pads:[],storageOrder:0,dilations:[],cacheKey:""},o={name:"GlobalMaxPool",inputNames:["X"],inputTypes:[p.TextureType.unpacked]};n.globalMaxPool=(d,g)=>(t(g),[d.run(Object.assign(Object.assign({},o),{get:()=>h(g,o,!0,l)}),g)]);const t=d=>{if(!d||d.length!==1)throw new Error("Pool ops requires 1 input.");if(d[0].type!=="float32"&&d[0].type!=="float64")throw new Error("Invalid input type.")},e=(d,g,m,b,_)=>{const v=d.length;if(g.kernelShape.length<=2){const w=g.kernelShape[g.kernelShape.length-1],S=g.strides[g.strides.length-1],A=g.pads[g.pads.length/2-1],O=g.pads[g.pads.length-1],x=d[v-1];let I="",N="",B="";if(I=A+O!==0?` - for (int i = 0; i < ${w}; i++) { - x[${v} - 1] = indices[${v} - 1] * ${S} - ${A} + i; - if (x[${v} - 1] < 0 || x[${v} - 1] >= ${x}) { - pad++; - continue; - } - ${m} - }`:` - for (int i = 0; i < ${w}; i++) { - x[${v} - 1] = indices[${v} - 1] * ${S} - ${A} + i; - ${m} - }`,g.kernelShape.length===2){const L=g.kernelShape[g.kernelShape.length-2],F=g.strides[g.strides.length-2],H=g.pads[g.pads.length/2-2],D=g.pads[g.pads.length-2],j=d[v-2];N=H+D!==0?` - for (int j = 0; j < ${L}; j++) { - x[${v} - 2] = indices[${v} - 2] * ${F} - ${H} + j; - if (x[${v} - 2] < 0 || x[${v} - 2] >= ${j}) { - pad+= ${w}; - continue; - } - `:` - for (int j = 0; j < ${L}; j++) { - x[${v} - 2] = indices[${v} - 2] * ${F} - ${H} + j; - `,B=` - } - `}return` - float process(int indices[${v}]) { - int x[${v}]; - copyVec(indices, x); - - float value = ${_}; - int pad = 0; - ${N} - ${I} - ${B} - ${b} - return value; - } - `}{const w=c.ShapeUtil.size(g.kernelShape),S=c.ShapeUtil.computeStrides(g.kernelShape),A=S.length,O=g.pads.length,x=i(A),I=r(d,"inputDims"),N=r(g.pads,"pads"),B=r(S,"kernelStrides"),L=r(g.strides,"strides");let F="";return F=g.pads.reduce((H,D)=>H+D)?` - if (x[j] >= inputDims[j] || x[j] < 0) { - pad++; - isPad = true; - break; - } - } - if (!isPad) { - ${m} - }`:` - } - ${m} - `,` - ${x} - float process(int indices[${v}]) { - int x[${v}]; - copyVec(indices, x); - int offset[${A}]; - int pads[${O}]; - int inputDims[${v}]; - int kernelStrides[${A}]; - int strides[${A}]; - ${N} - ${I} - ${L} - ${B} - - float value = ${_}; - int pad = 0; - bool isPad = false; - for (int i = 0; i < ${w}; i++) { - offsetToIndices(i, kernelStrides, offset); - isPad = false; - for (int j = ${v} - ${A}; j < ${v}; j++) { - x[j] = indices[j] * strides[j - ${v} + ${A}] - + offset[j - ${v} + ${A}] - pads[j - 2]; - ${F} - } - ${b} - - return value; - } - `}},r=(d,g)=>{let m="";for(let b=0;b` - void offsetToIndices(int offset, int[${d}] strides, out int[${d}] indices) { - if (${d} == 0) { - return; - } - for (int i = 0; i < ${d} - 1; ++i) { - indices[i] = offset / strides[i]; - offset -= indices[i] * strides[i]; - } - indices[${d} - 1] = offset; - }`},4939:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.reduceLogSumSquare=n.reduceLogSum=n.reduceProd=n.reduceMin=n.reduceMax=n.reduceMean=n.reduceSum=n.parseReduceAttributes=void 0;const u=a(246),c=a(782),p=a(2517),s=a(2039),h=(o,t,e,r,i)=>{l(t);const d={name:r,inputNames:["A"],inputTypes:[s.TextureType.unpacked]};return[o.run(Object.assign(Object.assign({},d),{cacheHint:e.cacheKey,get:()=>f(o,t,e,r,i,d)}),t)]};n.parseReduceAttributes=o=>{const t=o.attributes.getInts("axes",[]),e=o.attributes.getInt("keepdims",1)===1;return(0,u.createAttributeWithCacheKey)({axes:t,keepDims:e})};const f=(o,t,e,r,i,d)=>{const g=[],m=t[0].dims.length||1,b=[],_=p.ShapeUtil.normalizeAxes(e.axes,t[0].dims.length),v=i(t,_);let w=v[1];for(let A=0;A=0||_.length===0?(e.keepDims&&g.push(1),w=` - for(int j${A} = 0; j${A} < ${t[0].dims[A]}; j${A}++) { - inputIdx[${A}] = j${A}; - ${w} - }`):(b.push(`inputIdx[${A}] = outputIdx[${g.length}];`),g.push(t[0].dims[A]));const S=` - float process(int outputIdx[${g.length||1}]) { - float value; // final result - int inputIdx[${m}]; // addressing input data - ${b.join(` -`)} - ${v[0]} // init ops for reduce max/min - ${w} - ${v[2]} // final computation for reduce mean - return value; - }`;return Object.assign(Object.assign({},d),{output:{dims:g,type:t[0].type,textureType:s.TextureType.unpacked},shaderSource:S})},l=o=>{if(!o||o.length!==1)throw new Error("Reduce op requires 1 input.");if(c.NUMBER_TYPES.indexOf(o[0].type)===-1)throw new Error("Invalid input type.")};n.reduceSum=(o,t,e)=>h(o,t,e,"ReduceSum",()=>["value = 0.0;","value += _A(inputIdx);",""]),n.reduceMean=(o,t,e)=>h(o,t,e,"ReduceMean",(r,i)=>{let d=1;for(let g=0;g=0||i.length===0)&&(d*=r[0].dims[g]);return["value = 0.0;","value += _A(inputIdx);",`value /= ${d}.;`]}),n.reduceMax=(o,t,e)=>h(o,t,e,"ReduceMax",(r,i)=>{const d=[];for(let g=0;g=0||i.length===0)&&d.push(`inputIdx[${g}] = 0;`);return[`${d.join(` -`)} -value = _A(inputIdx);`,"value = max(value, _A(inputIdx));",""]}),n.reduceMin=(o,t,e)=>h(o,t,e,"ReduceMin",(r,i)=>{const d=[];for(let g=0;g=0||i.length===0)&&d.push(`inputIdx[${g}] = 0;`);return[`${d.join(` -`)} -value = _A(inputIdx);`,"value = min(value, _A(inputIdx));",""]}),n.reduceProd=(o,t,e)=>h(o,t,e,"ReduceProd",()=>["value = 1.0;","value *= _A(inputIdx);",""]),n.reduceLogSum=(o,t,e)=>h(o,t,e,"ReduceLogSum",()=>["value = 0.0;","value += _A(inputIdx);","value = log(value);"]),n.reduceLogSumSquare=(o,t,e)=>h(o,t,e,"ReduceLogSumSquare",()=>["float t; value = 0.0;","t = _A(inputIdx); value += t * t;",""])},7019:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.isReshapeCheap=n.processDims3D=n.createPackedReshape3DProgramInfoLoader=void 0;const u=a(2517),c=a(5060),p=a(2039),s=a(2827);n.createPackedReshape3DProgramInfoLoader=(h,f,l)=>{const o=(t=>({name:"Reshape (packed)",inputTypes:[p.TextureType.packed],inputNames:["A"],cacheHint:`${t}`}))(l);return Object.assign(Object.assign({},o),{get:()=>((t,e,r,i)=>{const d=e.dims,g=i;let m="";for(let v=0;v<4;v++){let w="";switch(v){case 0:w="outputCoords = rc;";break;case 1:w="outputCoords = ivec3(rc.x, rc.y+1, rc.z);";break;case 2:w="outputCoords = ivec3(rc.x, rc.y, rc.z+1);";break;case 3:w="outputCoords = ivec3(rc.x, rc.y+1, rc.z+1);";break;default:throw new Error}m+=` - ${w} - ${v>0?"if(outputCoords.y < rows && outputCoords.z < cols){":""} - int flattenedIndex = getFlattenedIndex(outputCoords); - - ivec3 inputRC = inputCoordsFromReshapedOutCoords(flattenedIndex); - vec2 innerDims = vec2(float(inputRC.y),float(inputRC.z)); - - result[${v}] = getChannel(getA(inputRC.x, inputRC.y, inputRC.z), innerDims); - - ${v>0?"}":""} - `}const b=(0,c.getGlsl)(t.session.backend.glContext.version),_=` - ${function(v){const w=u.ShapeUtil.computeStrides(v),S=["b","r","c"],A="index";return` - ivec3 inputCoordsFromReshapedOutCoords(int index) { - ${w.map((O,x)=>`int ${S[x]} = ${A} / ${O}; ${x===w.length-1?`int ${S[x+1]} = ${A} - ${S[x]} * ${O}`:`index -= ${S[x]} * ${O}`};`).join("")} - return ivec3(b, r, c); - } - `}(d)} - ${function(v){const w=u.ShapeUtil.computeStrides(v);return` - int getFlattenedIndex(ivec3 coords) { - // reverse y, z order - return coords.x * ${w[0]} + coords.z * ${w[1]} + coords.y; - } -`}(g)} - ${(0,s.unpackFromChannel)()} - - void main() { - ivec3 rc = getOutputCoords(); - - vec4 result = vec4(0.0); - - ivec3 outputCoords; - int rows = ${g[2]}; - int cols = ${g[1]}; - - ${m} - ${b.output} = result; - } - `;return Object.assign(Object.assign({},r),{output:{dims:g,type:e.type,textureType:p.TextureType.packed},shaderSource:_,hasMain:!0})})(h,f,o,l)})},n.processDims3D=function(h){if(h.length===0)return[1,1,1];let f=1;for(let l=0;l1?h[h.length-2]:1,h[h.length-1]]},n.isReshapeCheap=function(h,f){let l=!1;return l=h.length===0||f.length===0||(h.length<2||f.length<2?h[h.length-1]===f[f.length-1]:h[h.length-1]===f[f.length-1]&&h[h.length-2]===f[f.length-2]),l}},718:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.reshape=void 0;const u=a(2517);n.reshape=(c,p)=>{const s=u.ShapeUtil.calculateReshapedDims(p[0].dims,p[1].integerData);return c.session.pack?[c.reshapePacked(p[0],s)]:[c.reshapeUnpacked(p[0],s)]}},2268:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseResizeAttributesV11=n.parseResizeAttributesV10=n.resize=void 0;const u=a(5060),c=a(2039),p=a(9390),s=a(2827),h=a(9793),f={name:"Resize",inputNames:["A"],inputTypes:[c.TextureType.packed]};n.resize=(r,i,d)=>((0,h.validateInputs)(i,d),[r.run(Object.assign(Object.assign({},f),{cacheHint:d.cacheKey,get:()=>l(r,i,d)}),i)]),n.parseResizeAttributesV10=r=>(0,h.parseUpsampleAttributes)(r,10),n.parseResizeAttributesV11=r=>(0,h.parseUpsampleAttributes)(r,11);const l=(r,i,d)=>{const g=(0,u.getGlsl)(r.session.backend.glContext.version),[m,b]=o(i,d);if(m.every(F=>F===1)&&d.coordinateTransformMode!=="tf_crop_and_resize")return Object.assign(Object.assign({},f),{output:{dims:b,type:i[0].type,textureType:c.TextureType.packed},hasMain:!0,shaderSource:`void main() { - vec4 v = ${g.texture2D}(X, TexCoords); - ${g.output} = v; - }`});const _=b.length;if(_<2)throw new Error(`output dimension should be at least 2, but got ${_}`);const v=b[_-2],w=b[_-1],S=i[0].dims;if(_!==S.length)throw new Error(`output dimension should match input ${S.length}, but got ${_}`);const A=S[_-2],O=S[_-1],x=m[_-2],I=m[_-1];let N="";if(d.mode!=="linear")throw new Error(`resize (packed) does not support mode: '${d.mode}'`);switch(d.coordinateTransformMode){case"asymmetric":N=` - vec4 getSourceFracIndex(ivec4 coords) { - return vec4(coords) / scaleWHWH; - } - `;break;case"half_pixel":N=` - vec4 getSourceFracIndex(ivec4 coords) { - return (vec4(coords) + 0.5) / scaleWHWH - 0.5; - } - `;break;case"pytorch_half_pixel":N=` - vec4 getSourceFracIndex(ivec4 coords) { - vec4 fcoords = vec4(coords); - return vec4( - ${w}.0 > 1.0 ? (fcoords.x + 0.5) / scaleWHWH.x - 0.5 : 0.0, - ${v}.0 > 1.0 ? (fcoords.y + 0.5) / scaleWHWH.y - 0.5 : 0.0, - ${w}.0 > 1.0 ? (fcoords.z + 0.5) / scaleWHWH.z - 0.5 : 0.0, - ${v}.0 > 1.0 ? (fcoords.w + 0.5) / scaleWHWH.w - 0.5 : 0.0 - ); - } - `;break;case"align_corners":N=` - vec4 getSourceFracIndex(ivec4 coords) { - vec4 resized = vec4(${w}.0 - 1.0, ${v}.0 - 1.0, ${w}.0 - 1.0, - ${v}.0 - 1.0); - vec4 original = vec4(${O}.0 - 1.0, ${A}.0 - 1.0, ${O}.0 - 1.0, - ${A}.0 - 1.0); - vec4 new_scale = original / resized; - return vec4(coords) * new_scale; - } - `;break;default:throw new Error(`resize (packed) does not support coordinateTransformMode: '${d.coordinateTransformMode}'`)}const B=(0,p.getCoordsDataType)(_),L=` - const vec2 inputWH = vec2(${A}.0, ${O}.0); - const vec4 scaleWHWH = vec4(float(${x}), float(${I}), float(${x}), float(${I})); - ${(0,s.unpackFromChannel)()} - ${N} - float getAValue(int x10, int r, int c, int d) { - return getChannel(getA(x10, r, c, d), vec2(c, d)); - } - void main() { - ${B} rc = getOutputCoords(); - - int batch = rc[0]; - int depth = rc[1]; - - // retrieve the 4 coordinates that is used in the 4 packed output values. - ivec4 coords = ivec4(rc.wz, rc.w + 1, rc.z + 1); - - // calculate the source index in fraction - vec4 sourceFrac = getSourceFracIndex(coords); - - // get the lower and upper bound of the 4 values that will be packed into one texel. - ivec4 x00 = ivec4(max(sourceFrac.xy, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.xy))); - ivec4 x01 = ivec4(max(sourceFrac.xw, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.xw))); - ivec4 x10 = ivec4(max(sourceFrac.zy, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.zy))); - ivec4 x11 = ivec4(max(sourceFrac.zw, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.zw))); - - bool hasNextRow = rc.w < ${v-1}; - bool hasNextCol = rc.z < ${w-1}; - - // pack x00, x01, x10, x11's top-left corner into one vec4 structure - vec4 topLeft = vec4( - getAValue(batch, depth, x00.x, x00.y), - hasNextCol ? getAValue(batch, depth, x01.x, x01.y) : 0.0, - hasNextRow ? getAValue(batch, depth, x10.x, x10.y) : 0.0, - (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.x, x11.y) : 0.0); - - // pack x00, x01, x10, x11's top-right corner into one vec4 structure - vec4 topRight = vec4( - getAValue(batch, depth, x00.x, x00.w), - hasNextCol ? getAValue(batch, depth, x01.x, x01.w) : 0.0, - hasNextRow ? getAValue(batch, depth, x10.x, x10.w) : 0.0, - (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.x, x11.w) : 0.0); - - // pack x00, x01, x10, x11's bottom-left corner into one vec4 structure - vec4 bottomLeft = vec4( - getAValue(batch, depth, x00.z, x00.y), - hasNextCol ? getAValue(batch, depth, x01.z, x01.y) : 0.0, - hasNextRow ? getAValue(batch, depth, x10.z, x10.y) : 0.0, - (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.z, x11.y) : 0.0); - - // pack x00, x01, x10, x11's bottom-right corner into one vec4 structure - vec4 bottomRight = vec4( - getAValue(batch, depth, x00.z, x00.w), - hasNextCol ? getAValue(batch, depth, x01.z, x01.w) : 0.0, - hasNextRow ? getAValue(batch, depth, x10.z, x10.w) : 0.0, - (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.z, x11.w) : 0.0); - - // calculate the interpolation fraction on u and v direction - vec4 frac = vec4(sourceFrac) - floor(sourceFrac); - vec4 clampFrac = clamp(frac, vec4(0.0), vec4(1.0)); - - vec4 top = mix(topLeft, topRight, clampFrac.ywyw); - vec4 bottom = mix(bottomLeft, bottomRight, clampFrac.ywyw); - vec4 newValue = mix(top, bottom, clampFrac.xxzz); - - ${g.output} = vec4(newValue); - } - `;return Object.assign(Object.assign({},f),{output:{dims:b,type:i[0].type,textureType:c.TextureType.packed},hasMain:!0,shaderSource:L})},o=(r,i)=>{const d=r[0].dims;let g,m=i.scales;if(m.length===0){const _=r[i.scalesInputIdx];if(_&&_.size!==0){if(r[i.sizesInputIdx])throw new Error("Only one of scales or sizes must be provided as input.");m=t(_,i.mode,i.isResize)}else{const v=r[i.sizesInputIdx];if(!v||v.size===0)throw new Error("Either scales or sizes MUST be provided as input.");g=Array.from(v.integerData),m=e(g,d,i.mode,i.isResize)}}else if(r[i.sizesInputIdx])throw new Error("Only one of scales or sizes must be provided as input.");const b=g||d.map((_,v)=>Math.floor(_*m[v]));return[m,b]},t=(r,i,d)=>{const g=Array.from(r.floatData);return(0,h.scalesValidation)(g,i,d),g},e=(r,i,d,g)=>{const m=i.length,b=new Array(m);for(let _=0,v=m;_{Object.defineProperty(n,"__esModule",{value:!0}),n.shape=void 0;const u=a(9162);n.shape=(p,s)=>(c(s),[new u.Tensor([s[0].dims.length],"int32",void 0,void 0,new Int32Array(s[0].dims))]);const c=p=>{if(!p||p.length!==1)throw new Error("Shape requires 1 input.")}},2278:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.sliceV10=n.parseSliceAttributes=n.slice=void 0;const u=a(246),c=a(782),p=a(2517),s=a(2039),h={name:"Slice",inputNames:["A"],inputTypes:[s.TextureType.unpacked]};n.slice=(e,r,i)=>(l(r),[e.run(Object.assign(Object.assign({},h),{cacheHint:i.cacheKey,get:()=>f(e,r[0],i)}),r)]),n.parseSliceAttributes=e=>{const r=e.attributes.getInts("starts"),i=e.attributes.getInts("ends"),d=e.attributes.getInts("axes",[]);return(0,u.createAttributeWithCacheKey)({starts:r,ends:i,axes:d})};const f=(e,r,i)=>{const d=i.axes.length===0?r.dims.slice(0).map((S,A)=>A):i.axes,g=p.ShapeUtil.normalizeAxes(d,r.dims.length),m=i.starts.map((S,A)=>S>r.dims[g[A]]-1?r.dims[g[A]]:p.ShapeUtil.normalizeAxis(S,r.dims[g[A]])),b=i.ends.map((S,A)=>S>r.dims[g[A]]-1?r.dims[g[A]]:p.ShapeUtil.normalizeAxis(S,r.dims[g[A]])),_=r.dims.slice(),v=[];for(let S=0;S0&&v.push(`outputIdx[${g[S]}] += ${m[S]};`);const w=` - float process(int outputIdx[${_.length}]) { - ${v.join(` - `)} - return _A(outputIdx); - }`;return Object.assign(Object.assign({},h),{output:{dims:_,type:r.type,textureType:s.TextureType.unpacked},shaderSource:w})},l=e=>{if(!e||e.length!==1)throw new Error("Slice requires 1 input.");if(c.NUMBER_TYPES.indexOf(e[0].type)===-1)throw new Error("Invalid input type.")};n.sliceV10=(e,r)=>{t(r);const i=o(e,r);return[e.run(Object.assign(Object.assign({},h),{cacheHint:i.cacheKey,get:()=>f(e,r[0],i)}),[r[0]])]};const o=(e,r)=>{if(!e.session.isInitializer(r[1].dataId)||!e.session.isInitializer(r[2].dataId)||r.length>=4&&!e.session.isInitializer(r[3].dataId)||r.length>=5&&!e.session.isInitializer(r[4].dataId))throw new Error("dynamic slice attributes are not allowed");if(r.length>=5&&r[4].integerData.some(m=>m!==1))throw new Error("currently non-1 steps is not supported for Slice");const i=Array.from(r[1].integerData),d=Array.from(r[2].integerData),g=r.length>=4?Array.from(r[3].integerData):[];return{starts:i,ends:d,axes:g,cacheKey:`${g};${i};${d}`}},t=e=>{if(!e||e.length<3||e.length>5)throw new Error("Invalid input number.");if(e[1].type!=="int32"||e[1].dims.length!==1)throw new Error("Invalid input type.");if(e[2].type!=="int32"||e[2].dims.length!==1)throw new Error("Invalid input type.");if(e.length>=4&&(e[3].type!=="int32"||e[3].dims.length!==1))throw new Error("Invalid input type.");if(e.length>=5&&(e[4].type!=="int32"||e[4].dims.length!==1))throw new Error("Invalid input type.")}},5524:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.softmaxV13=n.parseSoftmaxAttributesV13=n.parseSoftmaxAttributes=n.softmax=void 0;const u=a(246),c=a(2517),p=a(5060),s=a(2039),h=a(3738),f={name:"SoftmaxComputeMax",inputNames:["A"],inputTypes:[s.TextureType.unpacked]},l={name:"SoftmaxComputeScale",inputNames:["A","Max"],inputTypes:[s.TextureType.unpacked,s.TextureType.unpacked]},o={name:"SoftMax",inputNames:["A","Max","Norm"],inputTypes:[s.TextureType.unpacked,s.TextureType.unpacked,s.TextureType.unpacked]};n.softmax=(g,m,b)=>{d(m);const _=m[0].dims.slice(),v=c.ShapeUtil.normalizeAxis(b.axis,_.length),w=c.ShapeUtil.sizeToDimension(_,v),S=c.ShapeUtil.sizeFromDimension(_,v);return t(g,m,b,w,S)},n.parseSoftmaxAttributes=g=>(0,u.createAttributeWithCacheKey)({axis:g.attributes.getInt("axis",1)}),n.parseSoftmaxAttributesV13=g=>(0,u.createAttributeWithCacheKey)({axis:g.attributes.getInt("axis",-1)}),n.softmaxV13=(g,m,b)=>{d(m);const _=m[0].dims.slice(),v=c.ShapeUtil.normalizeAxis(b.axis,_.length),w=_.length,S=v!==w-1,A=[];let O,x=[],I=[];S&&(x=Array.from({length:w}).map((F,H)=>H),x[v]=w-1,x[w-1]=v,x.map(F=>A.push(_[F])),O=(0,u.createAttributeWithCacheKey)({perm:x}),I=(0,h.transpose)(g,m,O));const N=S?c.ShapeUtil.sizeToDimension(A,w-1):c.ShapeUtil.sizeToDimension(_,w-1),B=S?c.ShapeUtil.sizeFromDimension(A,w-1):c.ShapeUtil.sizeFromDimension(_,w-1),L=t(g,S?I:m,b,N,B);return S?(0,h.transpose)(g,L,O):L};const t=(g,m,b,_,v)=>{const w=e(g,m[0],_,v,[_]),S=g.run(Object.assign(Object.assign({},f),{cacheHint:b.cacheKey,get:()=>w}),m),A=r(g,m[0],_,v,w.output.dims,[_]),O=g.run(Object.assign(Object.assign({},l),{cacheHint:b.cacheKey,get:()=>A}),[m[0],S]),x=i(g,m[0],_,v,w.output.dims,A.output.dims);return[g.run(Object.assign(Object.assign({},o),{cacheHint:b.cacheKey,get:()=>x}),[m[0],S,O])]},e=(g,m,b,_,v)=>{const[w,S]=g.calculateTextureWidthAndHeight(m.dims,s.TextureType.unpacked),A=v.length;if(b<1||_<1)throw new Error("Logical row count N and feature count D must be greater than or equal to 1");if(v.length!==1)throw new Error("Dimensionality of the output should be 1");if(v[0]!==b)throw new Error("Shape of the output should be equal to logical row count");const O=(0,p.getGlsl)(g.session.backend.glContext.version),x=` - float process(int[${A}] indices) { - int logical_row_start_offset = indices[0] * ${_}; - - float max = getColorAsFloat(${O.texture2D}(A, offsetToCoords(logical_row_start_offset, ${w}, - ${S} ))); - for(int i=1; i<${_}; ++i) - { - float current = getColorAsFloat(${O.texture2D}(A, offsetToCoords(logical_row_start_offset + i, - ${w}, ${S}))); - if(current > max) - max = current; - } - - return max; - }`;return Object.assign(Object.assign({},f),{output:{dims:v,type:m.type,textureType:s.TextureType.unpacked},shaderSource:x})},r=(g,m,b,_,v,w)=>{const[S,A]=g.calculateTextureWidthAndHeight(m.dims,s.TextureType.unpacked),O=w.length;if(b<1||_<1)throw new Error("Logical row count N and feature count D must be greater than or equal to 1");if(w.length!==1)throw new Error("Dimensionality of the output should be 1");if(w[0]!==b)throw new Error("Shape of the output should be equal to logical row count");if(v.length!==1)throw new Error("Dimensionality of the intermediate results should be 1");if(v[0]!==b)throw new Error("Shape of the intermediate results should be equal to logical row count");const x=` - float process(int[${O}] indices) { - int logical_row_start_offset = indices[0] * ${_}; - - float norm_factor = 0.0; - float max = _Max(indices); - for(int i=0; i<${_}; ++i) - { - norm_factor += exp(getColorAsFloat(${(0,p.getGlsl)(g.session.backend.glContext.version).texture2D}(A, offsetToCoords(logical_row_start_offset + i, - ${S}, ${A}))) - max); - } - - return norm_factor; - }`;return Object.assign(Object.assign({},l),{output:{dims:w,type:m.type,textureType:s.TextureType.unpacked},shaderSource:x})},i=(g,m,b,_,v,w)=>{const[S,A]=g.calculateTextureWidthAndHeight(m.dims,s.TextureType.unpacked),O=m.dims.length;if(b<1||_<1)throw new Error("Logical row count N and feature count D must be greater than or equal to 1");if(v.length!==1||w.length!==1)throw new Error("Dimensionality of the intermediate results should be 1");if(v[0]!==b||w[0]!==b)throw new Error("Shape of the intermediate results should be equal to logical row count");const x=` - float process(int[${O}] indices) { - - // get offset of current logical tensor index from the 2-D texture coordinates (TexCoords) - int offset = coordsToOffset(TexCoords, ${S}, ${A}); - - //determine the logical row for this index - int logical_row_index[1]; - logical_row_index[0] = offset / ${_}; - - float norm_factor = _Norm(logical_row_index); - - // avoid possible division by 0 - // if norm_facor is 0, all elements are zero - // if so, return 0 - if(norm_factor == 0.0) - return 0.0; - - return exp(_A(indices) - _Max(logical_row_index)) / norm_factor; - }`;return Object.assign(Object.assign({},o),{output:{dims:m.dims,type:m.type,textureType:s.TextureType.unpacked},shaderSource:x})},d=g=>{if(!g||g.length!==1)throw new Error("Softmax requires 1 input.");if(g[0].type!=="float32"&&g[0].type!=="float64")throw new Error("Invalid input type")}},5975:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseSplitAttributes=n.split=void 0;const u=a(246),c=a(2517),p=a(2039),s={name:"Split",inputNames:["A"],inputTypes:[p.TextureType.unpacked]};n.split=(o,t,e)=>{l(t);const r=c.ShapeUtil.normalizeAxis(e.axis,t[0].dims.length),i=h(o,t,r,e),d=[];for(let g=0;gf(o,t[0],e,r,g)}),t));return d},n.parseSplitAttributes=o=>{const t=o.attributes.getInt("axis",0),e=o.attributes.getInts("split",[]),r=o.outputs.length;return(0,u.createAttributeWithCacheKey)({axis:t,split:e,numOutputs:r})};const h=(o,t,e,r)=>{const[,i]=c.SplitUtil.splitShape(t[0].dims,e,r.split,r.numOutputs);return i.length},f=(o,t,e,r,i)=>{const[d,g]=c.SplitUtil.splitShape(t.dims,r,e.split,e.numOutputs),m=g[i],b=d[i],_=` - float process(int indices[${b.length}]) { - indices[${r}] += ${m}; - return _A(indices); - } - `;return Object.assign(Object.assign({},s),{cacheHint:`${e.cacheKey}:${i}`,output:{dims:b,type:t.type,textureType:p.TextureType.unpacked},shaderSource:_})},l=o=>{if(!o||o.length!==1)throw new Error("Split requires one input.");if(o[0].type!=="int8"&&o[0].type!=="uint8"&&o[0].type!=="int16"&&o[0].type!=="uint16"&&o[0].type!=="int32"&&o[0].type!=="uint32"&&o[0].type!=="float32"&&o[0].type!=="float64"&&o[0].type!=="bool")throw new Error("Invalid input type.")}},3933:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseSqueezeAttributes=n.squeezeV13=n.squeeze=void 0;const u=a(2517);n.squeeze=(s,h,f)=>{c(h);const l=u.ShapeUtil.squeezeShape(h[0].dims,f);return[s.reshapeUnpacked(h[0],l)]},n.squeezeV13=(s,h)=>(p(h),(0,n.squeeze)(s,[h[0]],Array.from(h[1].integerData))),n.parseSqueezeAttributes=s=>s.attributes.getInts("axes");const c=s=>{if(!s||s.length!==1)throw new Error("Squeeze requires 1 input.");if(s[0].type==="string")throw new Error("invalid input tensor types.")},p=s=>{if(!s||s.length!==2)throw new Error("Squeeze requires 2 inputs.");if(s[1].type!=="int32")throw new Error("Invalid input type.")}},6558:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.sum=void 0;const u=a(5060),c=a(2039);n.sum=(h,f)=>{s(f);const l={name:"Sum",inputNames:f.map((o,t)=>`X${t}`),inputTypes:new Array(f.length).fill(c.TextureType.unpacked)};return[h.run(Object.assign(Object.assign({},l),{get:()=>p(h,f,l)}),f)]};const p=(h,f,l)=>{const o=(0,u.getGlsl)(h.session.backend.glContext.version),t=f[0].dims.slice(),e=` - void main() { - vec4 result = ${f.map((r,i)=>`${o.texture2D}(X${i},TexCoords)`).join(" + ")}; - ${o.output} = result; - } - `;return Object.assign(Object.assign({},l),{output:{dims:t,type:f[0].type,textureType:c.TextureType.unpacked},hasMain:!0,shaderSource:e})},s=h=>{if(!h||h.length===0)throw new Error("Sum requires inputs.");const f=h[0].dims.length;for(let l=1;l{Object.defineProperty(n,"__esModule",{value:!0}),n.tile=void 0;const u=a(782),c=a(2039);n.tile=(h,f)=>{s(f);const l={name:"Tile",inputNames:["A"],inputTypes:[c.TextureType.unpacked]};return[h.run(Object.assign(Object.assign({},l),{get:()=>p(h,f,l)}),f)]};const p=(h,f,l)=>{const o=f[0].dims.slice(),t=new Array(o.length),e=[];for(let d=0;d{if(!h||h.length!==2)throw new Error("Tile requires 2 input.");if(h[1].dims.length!==1)throw new Error("The second input shape must 1 dimension.");if(h[1].dims[0]!==h[0].dims.length)throw new Error("Invalid input shape.");if(u.NUMBER_TYPES.indexOf(h[0].type)===-1)throw new Error("Invalid input type.");if(h[1].type!=="int32"&&h[1].type!=="int16")throw new Error("Invalid repeat type.")}},3738:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseTransposeAttributes=n.transpose=void 0;const u=a(246),c=a(2517),p=a(2039),s={name:"Transpose",inputNames:["A"],inputTypes:[p.TextureType.unpacked]};n.transpose=(e,r,i)=>(t(r),[e.run(Object.assign(Object.assign({},s),{cacheHint:i.cacheKey,get:()=>h(e,r[0],i.perm)}),r)]),n.parseTransposeAttributes=e=>(0,u.createAttributeWithCacheKey)({perm:e.attributes.getInts("perm",[])});const h=(e,r,i)=>{const d=r.dims;i=f(d,i);const g=l(d,i),m=d.length,b=` - ${o("perm",i,m)} - float process(int indices[${m}]) { - int a[${m}]; - perm(a, indices); - return _A(a); - }`;return Object.assign(Object.assign({},s),{output:{dims:g,type:r.type,textureType:p.TextureType.unpacked},shaderSource:b})},f=(e,r)=>(r&&r.length!==e.length&&(r=[...e.keys()].reverse()),r),l=(e,r)=>(r=f(e,r),c.ShapeUtil.sortBasedOnPerm(e,r)),o=(e,r,i)=>{const d=[];d.push(`void ${e}(out int a[${i}], int src[${i}]) {`);for(let g=0;g{if(!e||e.length!==1)throw new Error("Transpose requires 1 input.");if(e[0].type!=="float32"&&e[0].type!=="float64")throw new Error("input should be float tensor")}},8710:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.encodeAsUint8=void 0;const u=a(5060),c=a(2039);n.encodeAsUint8=(p,s)=>{const h=s.shape,f=(0,u.getGlsl)(p.session.backend.glContext.version),l=` - const float FLOAT_MAX = 1.70141184e38; - const float FLOAT_MIN = 1.17549435e-38; - - bool isNaN(float val) { - return (val < 1.0 || 0.0 < val || val == 0.0) ? false : true; - } - - highp vec4 encodeAsUint8(highp float v) { - if (isNaN(v)) { - return vec4(255, 255, 255, 255); - } - - highp float av = abs(v); - - if(av < FLOAT_MIN) { - return vec4(0.0, 0.0, 0.0, 0.0); - } else if(v > FLOAT_MAX) { - return vec4(0.0, 0.0, 128.0, 127.0) / 255.0; - } else if(v < -FLOAT_MAX) { - return vec4(0.0, 0.0, 128.0, 255.0) / 255.0; - } - - highp vec4 c = vec4(0,0,0,0); - - highp float e = floor(log2(av)); - highp float m = exp2(fract(log2(av))) - 1.0; - - c[2] = floor(128.0 * m); - m -= c[2] / 128.0; - c[1] = floor(32768.0 * m); - m -= c[1] / 32768.0; - c[0] = floor(8388608.0 * m); - - highp float ebias = e + 127.0; - c[3] = floor(ebias / 2.0); - ebias -= c[3] * 2.0; - c[2] += floor(ebias) * 128.0; - - c[3] += 128.0 * step(0.0, -v); - - return c / 255.0; - } - - void main() { - float value = ${f.texture2D}(X,TexCoords).r; - ${f.output} = encodeAsUint8(value); - }`,o={name:"Uint8Encode",inputTypes:[c.TextureType.unpacked],inputNames:["X"],output:{dims:h,type:s.tensor.type,textureType:c.TextureType.downloadUint8AsFloat},shaderSource:l,hasMain:!0};return p.executeProgram(o,[s.tensor])}},4909:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.tanh=n.tan=n.sqrt=n.sin=n.sigmoid=n.relu=n.not=n.neg=n.log=n.parseLeakyReluAttributes=n.leakyRelu=n.identity=n.floor=n.exp=n.parseEluAttributes=n.elu=n.cos=n.ceil=n.clipV11=n.parseClipAttributes=n.clip=n.atan=n.asin=n.acos=n.abs=n.glslTanh=n.glslTan=n.glslSqrt=n.glslSigmoid=n.glslRelu=n.glslSin=n.glslNot=n.glslNeg=n.glslLog=n.glslLeakyRelu=n.glslIdentity=n.glslClip=n.glslFloor=n.glslExp=n.glslElu=n.glslCos=n.glslCeil=n.glslAtan=n.glslAsin=n.glslAcos=n.glslAbs=void 0;const u=a(246),c=a(2517),p=a(8520),s=a(5060),h=a(2039);function f(){return L("abs")}function l(){return L("acos")}function o(){return L("asin")}function t(){return L("atan")}function e(){return L("ceil")}function r(){return L("cos")}function i(D){const j="elu";return{body:` - const float alpha = float(${D}); - - float ${j}_(float a) { - return a >= 0.0 ? a: (exp(a) - 1.0) * alpha; - } - vec4 ${j}_(vec4 v) { - return vec4(${j}_(v.x), ${j}_(v.y), ${j}_(v.z), ${j}_(v.w)); - } - `,name:j,type:p.FunctionType.ValueBased}}function d(){return L("exp")}function g(){return L("floor")}function m(D,j){const Z="clip";return{body:` - const float min = float(${D}); - const float max = float(${j}); - - float ${Z}_(float a) { - return clamp(a, min, max); - } - vec4 ${Z}_(vec4 v) { - return clamp(v, min, max); - } - `,name:Z,type:p.FunctionType.ValueBased}}function b(){const D="indentity";return{body:` - float ${D}_(float a) { - return a; - } - vec4 ${D}_(vec4 v) { - return v; - } - `,name:D,type:p.FunctionType.ValueBased}}function _(D){const j="leakyRelu";return{body:` - const float alpha = float(${D}); - - float ${j}_(float a) { - return a < 0.0 ? a * alpha : a; - } - vec4 ${j}_(vec4 v) { - return vec4(${j}_(v.x), ${j}_(v.y), ${j}_(v.z), ${j}_(v.w)); - } - `,name:j,type:p.FunctionType.ValueBased}}function v(){return L("log")}function w(){const D="neg";return{body:` - float ${D}_(float a) { - return -a; - } - vec4 ${D}_(vec4 v) { - return -v; - } - `,name:D,type:p.FunctionType.ValueBased}}function S(){const D="not";return{body:` - float ${D}_(float a) { - return float( ! bool(a) ); - } - bool ${D}_(bool a) { - return !a; - } - vec4 ${D}_(vec4 v) { - return vec4(!bool(v.x), !bool(v.y), !bool(v.z), !bool(v.w)); - } - bvec4 ${D}_(bvec4 v) { - return bvec4(!v.x, !v.y, !v.z, !v.w); - } - `,name:D,type:p.FunctionType.ValueBased}}function A(){return L("sin")}function O(){const D="relu";return{body:` - float ${D}_(float a) { - return max( a, 0.0 ); - } - vec4 ${D}_(vec4 v) { - return max( v, 0.0 ); - } - `,name:D,type:p.FunctionType.ValueBased}}function x(){const D="sigmoid";return{body:` - float ${D}_(float a) { - return 1.0 / (1.0 + exp(-a)); - } - vec4 ${D}_(vec4 v) { - return 1.0 / (1.0 + exp(-v)); - } - `,name:D,type:p.FunctionType.ValueBased}}function I(){return L("sqrt")}function N(){return L("tan")}function B(){const D="tanh";return{body:` - float ${D}_(float a) { - a = clamp(a, -10., 10.); - a = exp(2.*a); - return (a - 1.) / (a + 1.); - } - vec4 ${D}_(vec4 v) { - v = clamp(v, -10., 10.); - v = exp(2.*v); - return (v - 1.) / (v + 1.); - } - `,name:D,type:p.FunctionType.ValueBased}}function L(D){return{body:` - float ${D}_(float a) { - return ${D}(a); - } - vec4 ${D}_(vec4 v) { - return ${D}(v); - } - `,name:D,type:p.FunctionType.ValueBased}}n.glslAbs=f,n.glslAcos=l,n.glslAsin=o,n.glslAtan=t,n.glslCeil=e,n.glslCos=r,n.glslElu=i,n.glslExp=d,n.glslFloor=g,n.glslClip=m,n.glslIdentity=b,n.glslLeakyRelu=_,n.glslLog=v,n.glslNeg=w,n.glslNot=S,n.glslSin=A,n.glslRelu=O,n.glslSigmoid=x,n.glslSqrt=I,n.glslTan=N,n.glslTanh=B;const F=(D,j,Z,X)=>{const J=D.session.pack?h.TextureType.packed:h.TextureType.unpacked,ee={name:Z.name,inputTypes:[J],inputNames:["A"],cacheHint:X};return Object.assign(Object.assign({},ee),{get:()=>((ue,Ae,ve,oe)=>{const _e=ue.session.pack?h.TextureType.packed:h.TextureType.unpacked,be=(0,s.getGlsl)(ue.session.backend.glContext.version);return Object.assign(Object.assign({},Ae),{output:{dims:ve.dims,type:ve.type,textureType:_e},shaderSource:` - ${oe.body} - void main() { - vec4 v = ${be.texture2D}(A, TexCoords); - v = ${oe.name}_(v); - ${be.output} = v; - } - `,hasMain:!0})})(D,ee,j,Z)})};n.abs=(D,j)=>[D.run(F(D,j[0],f()),j)],n.acos=(D,j)=>[D.run(F(D,j[0],l()),j)],n.asin=(D,j)=>[D.run(F(D,j[0],o()),j)],n.atan=(D,j)=>[D.run(F(D,j[0],t()),j)],n.clip=(D,j,Z)=>[D.run(F(D,j[0],m(Z.min,Z.max),Z.cacheKey),j)],n.parseClipAttributes=D=>(0,u.createAttributeWithCacheKey)({min:D.attributes.getFloat("min",c.MIN_CLIP),max:D.attributes.getFloat("max",c.MAX_CLIP)}),n.clipV11=(D,j)=>{const Z=H(D,j);return(0,n.clip)(D,[j[0]],Z)};const H=(D,j)=>{if(j.length>=3&&(!D.session.isInitializer(j[1].dataId)||!D.session.isInitializer(j[2].dataId)))throw new Error("dynamic clip attributes are not allowed");const Z=j.length>=3?j[1].numberData[0]:c.MIN_CLIP,X=j.length>=3?j[2].numberData[0]:c.MAX_CLIP;return(0,u.createAttributeWithCacheKey)({min:Z,max:X})};n.ceil=(D,j)=>[D.run(F(D,j[0],e()),j)],n.cos=(D,j)=>[D.run(F(D,j[0],r()),j)],n.elu=(D,j,Z)=>[D.run(F(D,j[0],i(Z.alpha),Z.cacheKey),j)],n.parseEluAttributes=D=>(0,u.createAttributeWithCacheKey)({alpha:D.attributes.getFloat("alpha",1)}),n.exp=(D,j)=>[D.run(F(D,j[0],d()),j)],n.floor=(D,j)=>[D.run(F(D,j[0],g()),j)],n.identity=(D,j)=>[D.run(F(D,j[0],b()),j)],n.leakyRelu=(D,j,Z)=>[D.run(F(D,j[0],_(Z.alpha),Z.cacheKey),j)],n.parseLeakyReluAttributes=D=>(0,u.createAttributeWithCacheKey)({alpha:D.attributes.getFloat("alpha",.01)}),n.log=(D,j)=>[D.run(F(D,j[0],v()),j)],n.neg=(D,j)=>[D.run(F(D,j[0],w()),j)],n.not=(D,j)=>[D.run(F(D,j[0],S()),j)],n.relu=(D,j)=>[D.run(F(D,j[0],O()),j)],n.sigmoid=(D,j)=>[D.run(F(D,j[0],x()),j)],n.sin=(D,j)=>[D.run(F(D,j[0],A()),j)],n.sqrt=(D,j)=>[D.run(F(D,j[0],I()),j)],n.tan=(D,j)=>[D.run(F(D,j[0],N()),j)],n.tanh=(D,j)=>[D.run(F(D,j[0],B()),j)]},5611:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createUnpackProgramInfoLoader=n.createUnpackProgramInfo=void 0;const u=a(5060),c=a(2039),p=a(9390),s=a(2827),h={name:"unpack",inputNames:["A"],inputTypes:[c.TextureType.packed]};n.createUnpackProgramInfo=(f,l)=>{const o=l.dims.length,t=(0,s.getChannels)("rc",o),e=t.slice(-2),r=(0,p.getCoordsDataType)(o),i=(0,s.unpackFromChannel)(),d=l.dims.length===0?"":function(b,_){if(b===1)return"rc";let v="";for(let w=0;wObject.assign(Object.assign({},h),{get:()=>(0,n.createUnpackProgramInfo)(f,l)})},8428:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.parseUnsqueezeAttributes=n.unsqueezeV13=n.unsqueeze=void 0;const u=a(2517);n.unsqueeze=(s,h,f)=>{c(h);const l=u.ShapeUtil.unsqueezeShape(h[0].dims,f);return[s.reshapeUnpacked(h[0],l)]},n.unsqueezeV13=(s,h)=>(p(h),(0,n.unsqueeze)(s,[h[0]],Array.from(h[1].integerData))),n.parseUnsqueezeAttributes=s=>s.attributes.getInts("axes");const c=s=>{if(!s||s.length!==1)throw new Error("Unsqueeze requires 1 input.");if(s[0].type==="string")throw new Error("invalid input tensor types.")},p=s=>{if(!s||s.length!==2)throw new Error("Unsqueeze requires 2 inputs.");if(s[1].type!=="int32")throw new Error("Invalid input type.")}},9793:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.scalesValidation=n.validateInputs=n.parseUpsampleAttributes=n.parseUpsampleAttributesV9=n.parseUpsampleAttributesV7=n.upsample=void 0;const u=a(246),c=a(5060),p=a(2039),s={name:"Upsample",inputNames:["X"],inputTypes:[p.TextureType.unpacked]};n.upsample=(f,l,o)=>((0,n.validateInputs)(l,o),[f.run(Object.assign(Object.assign({},s),{cacheHint:o.cacheKey,get:()=>h(f,l,o)}),l)]),n.parseUpsampleAttributesV7=f=>(0,n.parseUpsampleAttributes)(f,7),n.parseUpsampleAttributesV9=f=>(0,n.parseUpsampleAttributes)(f,9),n.parseUpsampleAttributes=(f,l)=>{const o=l>=10,t=f.attributes.getString("mode","nearest");if(t!=="nearest"&&t!=="linear"&&(l<11||t!=="cubic"))throw new Error(`unrecognized mode: ${t}`);let e=[];l<9&&(e=f.attributes.getFloats("scales"),(0,n.scalesValidation)(e,t,o));const r=f.attributes.getFloat("extrapolation_value",0),i=l>10?f.attributes.getString("coordinate_transformation_mode","half_pixel"):"asymmetric";if(["asymmetric","pytorch_half_pixel","tf_half_pixel_for_nn","align_corners","tf_crop_and_resize","half_pixel"].indexOf(i)===-1)throw new Error(`coordinate_transform_mode '${i}' is not supported`);const d=i==="tf_crop_and_resize",g=d,m=t==="nearest"&&l>=11?f.attributes.getString("nearest_mode","round_prefer_floor"):"";if(["round_prefer_floor","round_prefer_ceil","floor","ceil",""].indexOf(m)===-1)throw new Error(`nearest_mode '${m}' is not supported`);const b=f.attributes.getFloat("cubic_coeff_a",-.75),_=f.attributes.getInt("exclude_outside",0)!==0;if(_&&t!=="cubic")throw new Error("exclude_outside can be set to 1 only when mode is CUBIC.");const v=l<11||t==="nearest"&&i==="asymmetric"&&m==="floor";let w=0,S=0,A=0;return l>10?f.inputs.length>2?(w=1,S=2,A=3):(S=1,A=2):l===9&&(S=1),(0,u.createAttributeWithCacheKey)({opset:l,isResize:o,mode:t,scales:e,extrapolationValue:r,coordinateTransformMode:i,useExtrapolation:g,needRoiInput:d,nearestMode:m,cubicCoefficientA:b,excludeOutside:_,useNearest2xOptimization:v,roiInputIdx:w,scalesInputIdx:S,sizesInputIdx:A})};const h=(f,l,o)=>{const t=(0,c.getGlsl)(f.session.backend.glContext.version),[e,r]=f.calculateTextureWidthAndHeight(l[0].dims,p.TextureType.unpacked),i=l[0].dims.map((A,O)=>Math.floor(A*o.scales[O])),[d,g]=f.calculateTextureWidthAndHeight(i,p.TextureType.unpacked),m=i.length,b=new Array(m),_=new Array(m);let v=` - int output_pitches[${m}]; - int input_pitches[${m}]; - `;for(let A=m-1;A>=0;A--)b[A]=A===m-1?1:b[A+1]*i[A+1],_[A]=A===m-1?1:_[A+1]*l[0].dims[A+1],v+=` - output_pitches[${A}] = ${b[A]}; - input_pitches[${A}] = ${_[A]}; - `;const w=` - float getInputFloat(int index) { - vec2 coords = offsetToCoords(index, ${e}, ${r}); - float value = getColorAsFloat(${t.texture2D}(X, coords)); - return value; - } - `,S=o.mode==="nearest"?` - ${w} - float process(int indices[${m}]) { - int input_index = 0; - int output_index = coordsToOffset(TexCoords, ${d}, ${g}); - - ${v} - - int d, m; - for (int dim = 0; dim < ${m}; ++dim) { - d = output_index / output_pitches[dim]; - m = output_index - d * output_pitches[dim]; - output_index = m; - - if (scales[dim] != 1 && d > 0) { - int d2 = d / scales[dim]; - m = d - d2 * scales[dim]; - d = d2; - } - input_index += input_pitches[dim] * d; - } - - return getInputFloat(input_index); - }`:m===4?` - ${w} - float process(int indices[4]) { - int input_index = 0; - int output_index = coordsToOffset(TexCoords, ${d}, ${g}); - - ${v} - - int m; - int index_of_dim0, index_of_dim1, index_of_dim2, index_of_dim3; - index_of_dim0 = output_index / output_pitches[0]; - m = output_index - index_of_dim0 * output_pitches[0]; - index_of_dim1 = m / output_pitches[1]; - m = m - index_of_dim1 * output_pitches[1]; - index_of_dim2 = m / output_pitches[2]; - m = m - index_of_dim2 * output_pitches[2]; - index_of_dim3 = m; - - int index_of_input_dim2, index_of_input_dim3, x_offset, y_offset; - index_of_input_dim2 = index_of_dim2 / scales[2]; - y_offset = index_of_dim2 - index_of_input_dim2 * scales[2]; - index_of_input_dim3 = index_of_dim3 / scales[3]; - x_offset = index_of_dim3 - index_of_input_dim3 * scales[3]; - - input_index = index_of_dim0 * input_pitches[0] + - index_of_dim1 * input_pitches[1] + - index_of_input_dim2 * input_pitches[2] + - index_of_input_dim3; - - float x00 = getInputFloat(input_index); - float x10, x01, x11; - - bool end_of_dim2 = false; - if (index_of_input_dim2 == (${l[0].dims[2]} - 1)) { - // It's the end in dimension 2 - x01 = x00; - end_of_dim2 = true; - } else { - x01 = getInputFloat(input_index + input_pitches[2]); - } - - if (index_of_input_dim3 == (input_pitches[2] - 1)) { - // It's the end in dimension 3 - x10 = x00; - x11 = x01; - } - else { - x10 = getInputFloat(input_index + 1); - x11 = end_of_dim2 ? x10 : getInputFloat(input_index + input_pitches[2] + 1); - } - - float y0 = x00 + float(y_offset) * (x01 - x00) / float(scales[2]); - float y1 = x10 + float(y_offset) * (x11 - x10) / float(scales[2]); - return y0 + float(x_offset) * (y1 - y0) / float(scales[3]); - }`:` - ${w} - float process(int indices[2]) { - int input_index = 0; - int output_index = coordsToOffset(TexCoords, ${d}, ${g}); - - ${v} - - int m; - int index_of_dim0, index_of_dim1; - index_of_dim0 = output_index / output_pitches[0]; - m = output_index - index_of_dim0 * output_pitches[0]; - index_of_dim1 = m; - - int index_of_input_dim0, index_of_input_dim1, x_offset, y_offset; - index_of_input_dim0 = index_of_dim0 / scales[0]; - y_offset = index_of_dim0 - index_of_input_dim0 * scales[0]; - index_of_input_dim1 = index_of_dim1 / scales[1]; - x_offset = index_of_dim1 - index_of_input_dim1 * scales[1]; - - input_index = index_of_input_dim0 * input_pitches[0] + index_of_input_dim1; - - float x00 = getInputFloat(input_index); - float x10, x01, x11; - - bool end_of_dim0 = false; - if (index_of_input_dim0 == (${l[0].dims[0]} - 1)) { - // It's the end in dimension 0 - x01 = x00; - end_of_dim0 = true; - } else { - x01 = getInputFloat(input_index + input_pitches[0]); - } - - if (index_of_input_dim1 == (input_pitches[0] - 1)) { - // It's the end in dimension 1 - x10 = x00; - x11 = x01; - } - else { - x10 = getInputFloat(input_index + 1); - x11 = end_of_dim0 ? x10 : getInputFloat(input_index + input_pitches[0] + 1); - } - - float y0 = x00 + float(y_offset) * (x01 - x00) / float(scales[0]); - float y1 = x10 + float(y_offset) * (x11 - x10) / float(scales[0]); - return y0 + float(x_offset) * (y1 - y0) / float(scales[1]); - }`;return Object.assign(Object.assign({},s),{output:{dims:i,type:l[0].type,textureType:p.TextureType.unpacked},shaderSource:S,variables:[{name:"scales",type:"int",arrayLength:o.scales.length,data:o.scales.map(A=>Math.ceil(A))}]})};n.validateInputs=(f,l)=>{if(!f||l.opset<9&&f.length!==1||l.opset>=9&&l.opset<11&&f.length!==2||l.opset>=11&&f.length<2)throw new Error("invalid inputs.");if(l.scales.length>0&&f[0].dims.length!==l.scales.length)throw new Error("Invalid input shape.");if(f[0].type==="string")throw new Error("Invalid input tensor types.")},n.scalesValidation=(f,l,o)=>{if(o){for(const t of f)if(t<=0)throw new Error("Scale value should be greater than 0.")}else for(const t of f)if(t<1)throw new Error("Scale value should be greater than or equal to 1.");if(!(l!=="linear"&&l!=="cubic"||f.length===2||f.length===4&&f[0]===1&&f[1]===1))throw new Error(`'Linear' mode and 'Cubic' mode only support 2-D inputs ('Bilinear', 'Bicubic') or 4-D inputs with the corresponding outermost 2 scale values being 1 in the ${o?"Resize":"Upsample"} opeartor.`)}},1958:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.ProgramManager=void 0;const u=a(1670),c=a(6231),p=a(8879),s=a(5060);n.ProgramManager=class{constructor(h,f,l){this.profiler=h,this.glContext=f,this.textureLayoutStrategy=l,this.repo=new Map,this.attributesBound=!1}getArtifact(h){return this.repo.get(h)}setArtifact(h,f){this.repo.set(h,f)}run(h,f,l){var o;this.profiler.event("op",`ProgramManager.run ${(o=h.programInfo.name)!==null&&o!==void 0?o:"unknown kernel"}`,()=>{var t;const e=this.glContext.gl,r=h.program;e.useProgram(r);try{this.bindOutput(l),this.attributesBound||this.bindAttributes(h.attribLocations),this.bindUniforms(h.uniformLocations,(t=h.programInfo.variables)!==null&&t!==void 0?t:[],f)}catch(i){throw c.Logger.error("ProgramManager",h.programInfo.shaderSource),i}this.profiler.event("backend","GlContext.draw()",()=>{this.glContext.draw()})},this.glContext)}dispose(){this.vertexShader&&this.glContext.deleteShader(this.vertexShader),this.repo.forEach(h=>this.glContext.deleteProgram(h.program))}build(h,f,l){return this.profiler.event("backend","ProgramManager.build",()=>{const o=new p.GlslPreprocessor(this.glContext,h,f,l),t=o.preprocess(),e=this.compile(t);return{programInfo:h,program:e,uniformLocations:this.getUniformLocations(e,o.context.programInfo.inputNames,o.context.programInfo.variables),attribLocations:this.getAttribLocations(e)}})}compile(h){if(!this.vertexShader){c.Logger.verbose("ProrgramManager","Compiling and caching Vertex shader for the first time");const o=(0,s.getVertexShaderSource)(this.glContext.version);this.vertexShader=this.glContext.compileShader(o,this.glContext.gl.VERTEX_SHADER)}u.env.debug&&c.Logger.verbose("ProrgramManager",`FragShader: -${h} -`);const f=this.glContext.compileShader(h,this.glContext.gl.FRAGMENT_SHADER),l=this.glContext.createProgram(this.vertexShader,f);return this.glContext.deleteShader(f),l}bindOutput(h){const f=h.width,l=h.height;c.Logger.verbose("ProrgramManager",`Binding output texture to Framebuffer: w/h=${f}/${l}, shape=${h.shape}, type=${h.tensor.type}`),this.glContext.attachFramebuffer(h.texture,f,l)}bindAttributes(h){const f=h.position,l=h.textureCoord;this.glContext.setVertexAttributes(f,l),this.attributesBound=!0}bindUniforms(h,f,l){var o;const t=this.glContext.gl;let e=0;for(const{name:r,type:i,location:d,arrayLength:g}of h){const m=(o=f.find(b=>b.name===r))===null||o===void 0?void 0:o.data;if(i!=="sampler2D"&&!m)throw new Error(`variable '${r}' does not have data defined in program info`);switch(i){case"sampler2D":this.bindTexture(l[e],d,e),e++;break;case"float":g?t.uniform1fv(d,m):t.uniform1f(d,m);break;case"int":g?t.uniform1iv(d,m):t.uniform1i(d,m);break;default:throw new Error(`Uniform not implemented: ${i}`)}}}bindTexture(h,f,l){this.glContext.bindTextureToUniform(h.texture,l,f)}getAttribLocations(h){return{position:this.getAttribLocation(h,"position"),textureCoord:this.getAttribLocation(h,"textureCoord")}}getUniformLocations(h,f,l){const o=[];if(f)for(const t of f)o.push({name:t,type:"sampler2D",location:this.getUniformLocation(h,t)});if(l)for(const t of l)o.push(Object.assign(Object.assign({},t),{location:this.getUniformLocation(h,t.name)}));return o}getUniformLocation(h,f){const l=this.glContext.gl.getUniformLocation(h,f);if(l===null)throw new Error(`Uniform ${f} not found.`);return l}getAttribLocation(h,f){return this.glContext.gl.getAttribLocation(h,f)}}},6416:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.WebGLSessionHandler=void 0;const u=a(6231),c=a(1047),p=a(8316),s=a(1640),h=a(1958),f=a(7859),l=a(5702);n.WebGLSessionHandler=class{constructor(o,t){this.backend=o,this.context=t,this.layoutStrategy=new f.PreferLogicalStrategy(o.glContext.maxTextureSize),this.programManager=new h.ProgramManager(this.context.profiler,o.glContext,this.layoutStrategy),this.textureManager=new l.TextureManager(o.glContext,this.layoutStrategy,this.context.profiler,{reuseTextures:o.textureCacheMode==="full"}),this.packedTextureDataCache=new Map,this.unpackedTextureDataCache=new Map,this.pack=o.pack,this.pack2unpackMap=new Map,this.unpack2packMap=new Map}createInferenceHandler(){return new p.WebGLInferenceHandler(this)}onGraphInitialized(o){const t=o.getValues().filter(e=>e.from===-1&&e.tensor).map(e=>e.tensor.dataId);this.initializers=new Set(t)}isInitializer(o){return!!this.initializers&&this.initializers.has(o)}addInitializer(o){this.initializers.add(o)}getTextureData(o,t){return t?this.packedTextureDataCache.get(o):this.unpackedTextureDataCache.get(o)}setTextureData(o,t,e=!1){u.Logger.verbose("WebGLSessionHandler","Storing Texture data in cache"),e?this.packedTextureDataCache.set(o,t):this.unpackedTextureDataCache.set(o,t)}dispose(){this.programManager.dispose(),this.textureManager.clearActiveTextures(),this.packedTextureDataCache.forEach(o=>this.textureManager.releaseTexture(o,!0)),this.packedTextureDataCache=new Map,this.unpackedTextureDataCache.forEach(o=>this.textureManager.releaseTexture(o,!0)),this.unpackedTextureDataCache=new Map}resolve(o,t,e){const r=(0,c.resolveOperator)(o,t,s.WEBGL_OP_RESOLVE_RULES);return{impl:r.opImpl,context:r.opInit?r.opInit(o,e):o}}}},7769:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.Uint8DataEncoder=n.RGBAFloatDataEncoder=n.RedFloat32DataEncoder=void 0;const u=a(6231);n.RedFloat32DataEncoder=class{constructor(c,p=1){if(p===1)this.internalFormat=c.R32F,this.format=c.RED,this.textureType=c.FLOAT,this.channelSize=p;else{if(p!==4)throw new Error(`Invalid number of channels: ${p}`);this.internalFormat=c.RGBA32F,this.format=c.RGBA,this.textureType=c.FLOAT,this.channelSize=p}}encode(c,p){let s,h;return c.constructor!==Float32Array&&(u.Logger.warning("Encoder","data was not of type Float32; creating new Float32Array"),h=new Float32Array(c)),p*this.channelSize>c.length?(u.Logger.warning("Encoder","Source data too small. Allocating larger array"),h=c,s=this.allocate(p*this.channelSize),h.forEach((f,l)=>s[l]=f)):(h=c,s=h),s}allocate(c){return new Float32Array(4*c)}decode(c,p){return this.channelSize===1?c.filter((s,h)=>h%4==0).subarray(0,p):c.subarray(0,p)}},n.RGBAFloatDataEncoder=class{constructor(c,p=1,s){if(p!==1&&p!==4)throw new Error(`Invalid number of channels: ${p}`);this.internalFormat=c.RGBA,this.format=c.RGBA,this.channelSize=p,this.textureType=s||c.FLOAT}encode(c,p){let s=c;return this.channelSize===1&&(u.Logger.verbose("Encoder","Exploding into a larger array"),s=this.allocate(p),c.forEach((h,f)=>s[4*f]=h)),s}allocate(c){return new Float32Array(4*c)}decode(c,p){return this.channelSize===1?c.filter((s,h)=>h%4==0).subarray(0,p):c.subarray(0,p)}},n.Uint8DataEncoder=class{constructor(c,p=1){if(this.channelSize=4,p===1)this.internalFormat=c.ALPHA,this.format=c.ALPHA,this.textureType=c.UNSIGNED_BYTE,this.channelSize=p;else{if(p!==4)throw new Error(`Invalid number of channels: ${p}`);this.internalFormat=c.RGBA,this.format=c.RGBA,this.textureType=c.UNSIGNED_BYTE,this.channelSize=p}}encode(c,p){return new Uint8Array(c.buffer,c.byteOffset,c.byteLength)}allocate(c){return new Uint8Array(c*this.channelSize)}decode(c,p){if(c instanceof Uint8Array)return c.subarray(0,p);throw new Error(`Invalid array type: ${c.constructor}`)}}},7859:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.getBatchDim=n.sizeToSquarishShape=n.getRowsCols=n.sizeFromShape=n.isInt=n.parseAxisParam=n.squeezeShape=n.PreferLogicalStrategy=n.AlwaysKeepOriginalSizeStrategy=void 0;const u=a(6231),c=a(2517);function p(o,t){const e=[],r=[],i=t!=null&&Array.isArray(t)&&t.length===0,d=t==null||i?null:s(t,o).sort();let g=0;for(let m=0;mm)&&o[m]===1&&(e.push(o[m]),r.push(m)),d[g]<=m&&g++}o[m]!==1&&(e.push(o[m]),r.push(m))}return{newShape:e,keptDims:r}}function s(o,t){const e=t.length;return o=o==null?t.map((r,i)=>i):[].concat(o),(0,c.assert)(o.every(r=>r>=-e&&r`All values in axis param must be in range [-${e}, ${e}) but got axis ${o}`),(0,c.assert)(o.every(h),()=>`All values in axis param must be integers but got axis ${o}`),o.map(r=>r<0?e+r:r)}function h(o){return o%1==0}function f(o){if(o.length===0)return 1;let t=o[0];for(let e=1;e=o.length?1:o.slice(t.breakAxis).reduce((m,b)=>m*b),g=t.breakAxis<=0?1:o.slice(0,t.breakAxis).reduce((m,b)=>m*b);if(!(d>e||g>e))return[d,g];u.Logger.verbose("TextureLayout",`Given width/height preferences were unattainable: shape:${o}, breakAxis:${t.breakAxis}`)}const r=o.reduce((d,g)=>d*g);let i=Math.floor(Math.sqrt(r));for(;i=e||r%i!=0)throw new Error(`The given dimensions are outside this GPU's boundaries: ${o}`);return[i,r/i]}},n.PreferLogicalStrategy=class{constructor(o){this.maxTextureSize=o}computeTextureWH(o,t){const e=this.computeTexture(o,t);return t&&t.isPacked&&(e[0]/=2,e[1]/=2),t&&t.reverseWH?[e[1],e[0]]:e}computeTexture(o,t){const e=t&&t.isPacked;if(o.length===0)return e?[2,2]:[1,1];let r=this.maxTextureSize;if(t&&t.breakAxis!==void 0){const g=t.breakAxis>=o.length?1:o.slice(t.breakAxis).reduce((b,_)=>b*_),m=t.breakAxis<=0?1:o.slice(0,t.breakAxis).reduce((b,_)=>b*_);if(!(g>r||m>r))return[g,m];u.Logger.verbose("TextureLayout",`Given width/height preferences were unattainable: shape:${o}, breakAxis:${t.breakAxis}`)}let i=o.slice(0);e&&(r*=2,i=i.map((g,m)=>m>=i.length-2?i[m]%2==0?i[m]:i[m]+1:i[m]),i.length===1&&(i=[2,i[0]])),i.length!==2&&(i=p(i).newShape);const d=f(i);return i.length<=1&&d<=r?[1,d]:i.length===2&&i[0]<=r&&i[1]<=r?i:i.length===3&&i[0]*i[1]<=r&&i[2]<=r?[i[0]*i[1],i[2]]:i.length===3&&i[0]<=r&&i[1]*i[2]<=r?[i[0],i[1]*i[2]]:i.length===4&&i[0]*i[1]*i[2]<=r&&i[3]<=r?[i[0]*i[1]*i[2],i[3]]:i.length===4&&i[0]<=r&&i[1]*i[2]*i[3]<=r?[i[0],i[1]*i[2]*i[3]]:e?l(d/4).map(g=>2*g):l(d)}},n.squeezeShape=p,n.parseAxisParam=s,n.isInt=h,n.sizeFromShape=f,n.getRowsCols=function(o){if(o.length===0)throw Error("Cannot get rows and columns of an empty shape array.");return[o.length>1?o[o.length-2]:1,o[o.length-1]]},n.sizeToSquarishShape=l,n.getBatchDim=function(o,t=2){return f(o.slice(0,o.length-t))}},4057:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createTextureLayoutFromShape=n.calculateTextureWidthAndHeight=n.createTextureLayoutFromTextureType=void 0;const u=a(2517),c=a(2039);n.createTextureLayoutFromTextureType=(p,s,h)=>{const f=h===c.TextureType.unpacked||h===c.TextureType.unpackedReversed?1:4,l=h===c.TextureType.packed,o=h===c.TextureType.unpackedReversed||h===c.TextureType.packed,t=h===c.TextureType.packedLastDimension?s.length-1:void 0,e=h===c.TextureType.packedLastDimension?s.map((r,i)=>i===s.length-1?4*r:r):void 0;return(0,n.createTextureLayoutFromShape)(p,s,f,e,{isPacked:l,reverseWH:o,breakAxis:t})},n.calculateTextureWidthAndHeight=(p,s,h)=>{const f=(0,n.createTextureLayoutFromTextureType)(p,s,h);return[f.width,f.height]},n.createTextureLayoutFromShape=(p,s,h=1,f,l)=>{const o=!(!l||!l.isPacked),[t,e]=p.computeTextureWH(o&&f||s,l),r=s.length;let i=s.slice(0);if(r===0&&(i=[1]),h===1)f=s;else if(o){if(h!==4)throw new Error("a packed texture must be 4-channel");f=s,r>0&&(i[r-1]=Math.ceil(i[r-1]/2)),r>1&&(i[r-2]=Math.ceil(i[r-2]/2))}else if(!f)throw new Error("Unpacked shape is needed when using channels > 1");return{width:t,height:e,channels:h,isPacked:o,shape:i,strides:u.ShapeUtil.computeStrides(i),unpackedShape:f,reversedWH:l&&l.reverseWH}}},5702:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.TextureManager=void 0;const u=a(6231);n.TextureManager=class{constructor(c,p,s,h){this.glContext=c,this.layoutStrategy=p,this.profiler=s,this.config=h,this.pendingRead=new Map,h.reuseTextures&&(this.inUseTextures=new Map,this.idleTextures=new Map,this.textureLookup=new Map)}createTextureFromLayout(c,p,s,h){const f=this.toEncoderType(c),l=this.glContext.getEncoder(f,p.channels||1,h);if(p.isPacked&&h===1)throw new Error("not implemented");const o=p.width,t=p.height;let e,r;if(this.config.reuseTextures){e=`${o}x${t}_${l.format}_${l.internalFormat}_${l.textureType}`,r=this.inUseTextures.get(e),r||(r=[],this.inUseTextures.set(e,r));const d=this.idleTextures.get(e);if(d&&d.length>0){const g=d.pop();return r.push(g),h===1&&this.glContext.updateTexture(g,o,t,l,this.toTextureData(c,s)),g}}u.Logger.verbose("TextureManager",`Creating new texture of size ${p.width}x${p.height}`);const i=this.glContext.allocateTexture(o,t,l,this.toTextureData(c,s));return this.config.reuseTextures&&(r.push(i),this.textureLookup.set(i,e)),i}readTexture(c,p,s){return s||(s=1),this.profiler.event("backend","TextureManager.readTexture",()=>{const h=c.shape.reduce((l,o)=>l*o)*s,f=this.glContext.readTexture(c.texture,c.width,c.height,h,this.toEncoderType(p),s);return this.toTensorData(p,f)})}async readTextureAsync(c,p,s){const h=c.tensor.dataId;if(s||(s=1),this.pendingRead.has(h)){const f=this.pendingRead.get(h);return new Promise(l=>f==null?void 0:f.push(l))}return this.profiler.event("backend","TextureManager.readTextureAsync",async()=>{this.pendingRead.set(h,[]);const f=c.shape.reduce((e,r)=>e*r)*s;await this.glContext.createAndWaitForFence();const l=this.glContext.readTexture(c.texture,c.width,c.height,f,this.toEncoderType(p),s),o=this.toTensorData(p,l),t=this.pendingRead.get(h);return this.pendingRead.delete(h),t==null||t.forEach(e=>e(o)),o})}readUint8TextureAsFloat(c){return this.profiler.event("backend","TextureManager.readUint8TextureAsFloat",()=>{const p=c.shape.reduce((h,f)=>h*f),s=this.glContext.readTexture(c.texture,c.width,c.height,4*p,"byte",4);return new Float32Array(s.buffer,s.byteOffset,p)})}releaseTexture(c,p){let s;if(this.config.reuseTextures&&(s=this.textureLookup.get(c.texture),s)){p&&this.textureLookup.delete(s);const h=this.inUseTextures.get(s);if(h){const f=h.indexOf(c.texture);if(f!==-1){h.splice(f,1);let l=this.idleTextures.get(s);l||(l=[],this.idleTextures.set(s,l)),l.push(c.texture)}}}s&&!p||(u.Logger.verbose("TextureManager",`Deleting texture of size ${c.width}x${c.height}`),this.glContext.deleteTexture(c.texture))}toTensorData(c,p){switch(c){case"int16":return p instanceof Int16Array?p:Int16Array.from(p);case"int32":return p instanceof Int32Array?p:Int32Array.from(p);case"int8":return p instanceof Int8Array?p:Int8Array.from(p);case"uint16":return p instanceof Uint16Array?p:Uint16Array.from(p);case"uint32":return p instanceof Uint32Array?p:Uint32Array.from(p);case"uint8":case"bool":return p instanceof Uint8Array?p:Uint8Array.from(p);case"float32":return p instanceof Float32Array?p:Float32Array.from(p);case"float64":return p instanceof Float64Array?p:Float64Array.from(p);default:throw new Error(`TensorData type ${c} is not supported`)}}toTextureData(c,p){if(p)return p instanceof Float32Array?p:new Float32Array(p)}toEncoderType(c){return"float"}clearActiveTextures(){this.glContext.clearActiveTextures()}}},2039:(y,n)=>{var a;Object.defineProperty(n,"__esModule",{value:!0}),n.TextureType=void 0,(a=n.TextureType||(n.TextureType={}))[a.unpacked=0]="unpacked",a[a.unpackedReversed=1]="unpackedReversed",a[a.packed=2]="packed",a[a.downloadUint8AsFloat=3]="downloadUint8AsFloat",a[a.packedLastDimension=4]="packedLastDimension"},9390:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.getGlChannels=n.getCoordsDataType=n.getSqueezedParams=n.squeezeInputShape=n.generateShaderFuncNameFromInputSamplerNameAtOutCoords=n.generateShaderFuncNameFromInputSamplerName=n.repeatedTry=n.getPackedShape=void 0;const u=a(2517);n.getPackedShape=function(c){const p=c.length;return c.slice(0,p-1).concat(c[p-1]/4)},n.repeatedTry=async function(c,p=h=>0,s){return new Promise((h,f)=>{let l=0;const o=()=>{if(c())return void h();l++;const t=p(l);s!=null&&l>=s?f():setTimeout(o,t)};o()})},n.generateShaderFuncNameFromInputSamplerName=function(c){return(0,u.assert)(c!==void 0&&c.length!==0,()=>"empty string found for sampler name"),"get"+c.charAt(0).toUpperCase()+c.slice(1)},n.generateShaderFuncNameFromInputSamplerNameAtOutCoords=function(c){return(0,u.assert)(c!==void 0&&c.length!==0,()=>"empty string found for sampler name"),"get"+c.charAt(0).toUpperCase()+c.slice(1)+"AtOutCoords"},n.squeezeInputShape=function(c,p){let s=JSON.parse(JSON.stringify(c));return s=p,s},n.getSqueezedParams=function(c,p){return p.map(s=>c[s]).join(", ")},n.getCoordsDataType=function(c){if(c<=1)return"int";if(c===2)return"ivec2";if(c===3)return"ivec3";if(c===4)return"ivec4";if(c===5)return"ivec5";if(c===6)return"ivec6";throw Error(`GPU for rank ${c} is not yet supported`)},n.getGlChannels=function(c=6){return["x","y","z","w","u","v"].slice(0,c)}},7305:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.createNewWebGLContext=n.createWebGLContext=void 0;const u=a(6231),c=a(1713),p={};function s(h){const f=function(){if(typeof document>"u"){if(typeof OffscreenCanvas>"u")throw new TypeError("failed to create canvas: OffscreenCanvas is not supported");return new OffscreenCanvas(1,1)}const t=document.createElement("canvas");return t.width=1,t.height=1,t}();let l;const o={alpha:!1,depth:!1,antialias:!1,stencil:!1,preserveDrawingBuffer:!1,premultipliedAlpha:!1,failIfMajorPerformanceCaveat:!1};if((!h||h==="webgl2")&&(l=f.getContext("webgl2",o),l))try{return new c.WebGLContext(l,2)}catch(t){u.Logger.warning("GlContextFactory",`failed to create WebGLContext using contextId 'webgl2'. Error: ${t}`)}if((!h||h==="webgl")&&(l=f.getContext("webgl",o)||f.getContext("experimental-webgl",o),l))try{return new c.WebGLContext(l,1)}catch(t){u.Logger.warning("GlContextFactory",`failed to create WebGLContext using contextId 'webgl' or 'experimental-webgl'. Error: ${t}`)}throw new Error("WebGL is not supported")}n.createWebGLContext=function h(f){let l;f&&f!=="webgl2"||!("webgl2"in p)?f&&f!=="webgl"||!("webgl"in p)||(l=p.webgl):l=p.webgl2,l=l||s(f),f=f||l.version===1?"webgl":"webgl2";const o=l.gl;return p[f]=l,o.isContextLost()?(delete p[f],h(f)):(o.disable(o.DEPTH_TEST),o.disable(o.STENCIL_TEST),o.disable(o.BLEND),o.disable(o.DITHER),o.disable(o.POLYGON_OFFSET_FILL),o.disable(o.SAMPLE_COVERAGE),o.enable(o.SCISSOR_TEST),o.enable(o.CULL_FACE),o.cullFace(o.BACK),l)},n.createNewWebGLContext=s},1713:function(y,n,a){var u=this&&this.__createBinding||(Object.create?function(o,t,e,r){r===void 0&&(r=e);var i=Object.getOwnPropertyDescriptor(t,e);i&&!("get"in i?!t.__esModule:i.writable||i.configurable)||(i={enumerable:!0,get:function(){return t[e]}}),Object.defineProperty(o,r,i)}:function(o,t,e,r){r===void 0&&(r=e),o[r]=t[e]}),c=this&&this.__setModuleDefault||(Object.create?function(o,t){Object.defineProperty(o,"default",{enumerable:!0,value:t})}:function(o,t){o.default=t}),p=this&&this.__importStar||function(o){if(o&&o.__esModule)return o;var t={};if(o!=null)for(var e in o)e!=="default"&&Object.prototype.hasOwnProperty.call(o,e)&&u(t,o,e);return c(t,o),t};Object.defineProperty(n,"__esModule",{value:!0}),n.WebGLContext=n.linearSearchLastTrue=void 0;const s=a(1670),h=p(a(7769)),f=a(9390);function l(o){let t=0;for(;tthis.isTimerResultAvailable(o)),this.getTimerResult(o)}async createAndWaitForFence(){const o=this.createFence(this.gl);return this.pollFence(o)}createFence(o){let t;const e=o,r=e.fenceSync(e.SYNC_GPU_COMMANDS_COMPLETE,0);return o.flush(),t=r===null?()=>!0:()=>{const i=e.clientWaitSync(r,0,0);return i===e.ALREADY_SIGNALED||i===e.CONDITION_SATISFIED},{query:r,isFencePassed:t}}async pollFence(o){return new Promise(t=>{this.addItemToPoll(()=>o.isFencePassed(),()=>t())})}pollItems(){const o=l(this.itemsToPoll.map(t=>t.isDoneFn));for(let t=0;t<=o;++t){const{resolveFn:e}=this.itemsToPoll[t];e()}this.itemsToPoll=this.itemsToPoll.slice(o+1)}async addItemToPoll(o,t){this.itemsToPoll.push({isDoneFn:o,resolveFn:t}),this.itemsToPoll.length>1||await(0,f.repeatedTry)(()=>(this.pollItems(),this.itemsToPoll.length===0))}}},1036:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.ExecutionPlan=void 0;const u=a(6231);class c{constructor(s,h){this.op=s,this.node=h}}n.ExecutionPlan=class{constructor(p,s,h){this.graph=p,this.profiler=h,this.initialize(s)}initialize(p){this.profiler.event("session","ExecutionPlan.initialize",()=>{const s=this.graph.getNodes();if(s.length!==p.length)throw new Error("The size of nodes and OPs do not match.");this._ops=p.map((h,f)=>new c(h,s[f])),this.reset(),this._starter=[],this._ops.forEach((h,f)=>{let l=!0;for(const o of h.node.inputs)if(!this._values[o]&&this.graph.getInputIndices().indexOf(o)===-1){l=!1;break}l&&this._starter.push(f)})})}reset(){this._values=this.graph.getValues().map(p=>p.tensor)}async execute(p,s){return this.profiler.event("session","ExecutionPlan.execute",async()=>{this.reset();const h=p.createInferenceHandler(),f=this.graph.getInputIndices();if(s.length!==f.length)throw new Error(`number of input tensors don't match the number of inputs to the model: actual: ${s.length} expected: ${f.length}`);s.forEach((i,d)=>{const g=f[d];this._values[g]=i});const l=this._starter.slice(0),o=this.graph.getValues(),t=this.graph.getNodes();let e=0;for(;ethis._values[v]);if(g.indexOf(void 0)!==-1)throw new Error(`unresolved input detected: op: ${d.node}`);const m=g;u.Logger.verbose("ExecPlan",`Runing op:${d.node.name} (${m.map((v,w)=>`'${d.node.inputs[w]}': ${v.type}[${v.dims.join(",")}]`).join(", ")})`);const b=await this.profiler.event("node",d.node.name,async()=>d.op.impl(h,m,d.op.context));if(b.length!==d.node.outputs.length)throw new Error("the size of output does not match model definition.");b.forEach((v,w)=>{const S=d.node.outputs[w];if(this._values[S])throw new Error(`output [${S}] already has value: op:${d.node.name}`);this._values[S]=v});const _=new Set;b.forEach((v,w)=>{const S=d.node.outputs[w];for(const A of o[S].to){const O=t[A];let x=!0;for(const I of O.inputs)if(!this._values[I]){x=!1;break}x&&_.add(A)}}),l.push(..._)}const r=[];for(let i=0;i{Object.defineProperty(n,"__esModule",{value:!0}),n.Graph=void 0;const u=a(1446),c=a(7778),p=a(9395),s=a(9162),h=a(2517);var f=p.onnxruntime.experimental.fbs;n.Graph={from:(e,r)=>new t(e,r)};class l{constructor(r){this._from=void 0,this._to=[],this.tensor=void 0,this.type=void 0,r&&(this.type=h.ProtoUtil.tensorValueTypeFromProto(r.type.tensorType))}get from(){return this._from}get to(){return this._to}}class o{constructor(r,i){r instanceof u.onnx.NodeProto?(this.name=r.name,this.opType=r.opType,this.attributes=new c.Attribute(r.attribute)):r instanceof f.Node&&(this.name=i??r.name(),this.opType=r.opType(),this.attributes=new c.Attribute(h.ProtoUtil.tensorAttributesFromORTFormat(r))),this.inputs=[],this.outputs=[],this.executeNode=!0}}class t{constructor(r,i){if(!r)throw new TypeError("graph is empty");this.buildGraph(r),this.transformGraph(i),this.checkIsAcyclic()}getInputIndices(){return this._allInputIndices}getInputNames(){return this._allInputNames}getOutputIndices(){return this._allOutputIndices}getOutputNames(){return this._allOutputNames}getValues(){return this._allData}getNodes(){return this._nodes}buildGraph(r){if(r instanceof u.onnx.GraphProto)this.buildGraphFromOnnxFormat(r);else{if(!(r instanceof f.Graph))throw new TypeError("Graph type is not supported.");this.buildGraphFromOrtFormat(r)}}buildGraphFromOnnxFormat(r){const i=new Map;this._allData=[],this._allInputIndices=[],this._allInputNames=[],this._allOutputIndices=[],this._allOutputNames=[],this._nodes=[];const d=new Map;if(!r.input)throw new Error("missing information in graph: input");const g=[];for(const m of r.input){if(i.has(m.name))throw new Error(`duplicated input name: ${m.name}`);const b=this._allData.push(new l(m))-1;i.set(m.name,b),g.push(m.name)}if(!r.initializer)throw new Error("missing information in graph: initializer");for(const m of r.initializer){let b=i.get(m.name);if(b===void 0){const _=new l;_.type={shape:{dims:h.ProtoUtil.tensorDimsFromProto(m.dims)},tensorType:h.ProtoUtil.tensorDataTypeFromProto(m.dataType)},b=this._allData.push(_)-1,i.set(m.name,b)}this._allData[b]._from=-1,this._allData[b].tensor=s.Tensor.fromProto(m)}for(let m=0;m{this._allData[g]._to.forEach(m=>{r.add(m)})});const i=Array.from(r),d=new Array(this._nodes.length).fill("white");for(;i.length>0;){const g=i.pop();d[g]==="gray"?d[g]="black":(i.push(g),d[g]="gray",this._nodes[g].outputs.forEach(m=>{const b=this._allData[m];if(b.tensor!==void 0)throw new Error("node outputs should not be initialized");if(b._from!==g)throw new Error("from property of the Value object doesn't match index of Node being processed");b._to.forEach(_=>{if(d[_]==="gray")throw new Error("model graph is cyclic");d[_]==="white"&&i.push(_)})}))}}transformGraph(r){this.removeAllIdentityNodes(),this.removeAllDropoutNodes(),this.fuseConvActivationNodes(),r&&r.transformGraph(this),this.finalizeGraph()}finalizeGraph(){let r=0;for(let i=0;i0&&(this._nodes[i].inputs.forEach(d=>{const g=this._allData[d]._to.indexOf(i+r);g!==-1&&(this._allData[d]._to[g]=i)}),this._nodes[i].outputs.forEach(d=>{this._allData[d]._from&&this._allData[d]._from===i+r&&(this._allData[d]._from=i)})):(r++,this._nodes[i].outputs.forEach(d=>{this._allData[d]._from=-2}),this._nodes.splice(i,1),i--);r=0;for(let i=0;i0){let d=-1;this._allData[i].from!==void 0&&this._allData[i].from!==-1?(d=this._nodes[this._allData[i].from].outputs.indexOf(i+r),d!==-1&&(this._nodes[this._allData[i].from].outputs[d]=i)):(d=this._allInputIndices.indexOf(i+r),d!==-1&&(this._allInputIndices[d]=i)),this._allData[i].to.forEach(g=>{d=this._nodes[g].inputs.indexOf(i+r),d!==-1&&(this._nodes[g].inputs[d]=i)}),this._allData[i].to.length===0&&(d=this._allOutputIndices.indexOf(i+r),d!==-1&&(this._allOutputIndices[d]=i))}}else r++,this._allData.splice(i,1),i--}deleteNode(r){const i=this._nodes[r];if(i.outputs.length>1){for(let v=1;v0)throw new Error("Node deletion with more than one output connected to other nodes is not supported. ")}i.executeNode=!1;const d=i.inputs[0],g=i.outputs[0],m=this._allData[g].to,b=this._allData[d].to.indexOf(r);if(b===-1)throw new Error("The Value object doesn't have the current Node in it's 'to' property ");this._allData[d].to.splice(b,1),this._allData[g]._to=[];const _=this._allOutputIndices.indexOf(g);if(_!==-1&&(this._allOutputIndices[_]=d),m&&m.length>0)for(const v of m){const w=this._nodes[v].inputs.indexOf(g);if(w===-1)throw new Error("The Node object doesn't have the output Value in it's 'inputs' property ");this._nodes[v].inputs[w]=d,this._allData[d].to.push(v)}}removeAllDropoutNodes(){let r=0;for(const i of this._nodes){if(i.opType==="Dropout"){if(i.inputs.length!==1)throw new Error("Dropout nodes should only contain one input. ");if(i.outputs.length!==1&&i.outputs.length!==2)throw new Error("Dropout nodes should contain either 1 or 2 output(s)");if(i.outputs.length===2&&this._allData[i.outputs[1]]._to.length!==0)throw new Error("Dropout nodes's second output should not be referenced by other nodes");this.deleteNode(r)}r++}}removeAllIdentityNodes(){let r=0;for(const i of this._nodes)i.opType==="Identity"&&this.deleteNode(r),r++}isActivation(r){switch(r.opType){case"Relu":case"Sigmoid":case"Clip":return!0;default:return!1}}fuseConvActivationNodes(){for(const r of this._nodes)if(r.opType==="Conv"){const i=this._allData[r.outputs[0]]._to;if(i.length===1&&this.isActivation(this._nodes[i[0]])){const d=this._nodes[i[0]];if(d.opType==="Clip")if(d.inputs.length===1)try{r.attributes.set("activation_params","floats",[d.attributes.getFloat("min"),d.attributes.getFloat("max")])}catch{r.attributes.set("activation_params","floats",[h.MIN_CLIP,h.MAX_CLIP])}else{if(!(d.inputs.length>=3&&this._allData[d.inputs[1]].tensor!==void 0&&this._allData[d.inputs[2]].tensor!==void 0))continue;r.attributes.set("activation_params","floats",[this._allData[d.inputs[1]].tensor.floatData[0],this._allData[d.inputs[2]].tensor.floatData[0]])}r.attributes.set("activation","string",d.opType),this.deleteNode(i[0])}}}}},6231:(y,n)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.now=n.Profiler=n.Logger=void 0;const a={verbose:1e3,info:2e3,warning:4e3,error:5e3,fatal:6e3},u={none:new class{log(o,t,e){}},console:new class{log(o,t,e){console.log(`${this.color(o)} ${e?"\x1B[35m"+e+"\x1B[0m ":""}${t}`)}color(o){switch(o){case"verbose":return"\x1B[34;40mv\x1B[0m";case"info":return"\x1B[32mi\x1B[0m";case"warning":return"\x1B[30;43mw\x1B[0m";case"error":return"\x1B[31;40me\x1B[0m";case"fatal":return"\x1B[101mf\x1B[0m";default:throw new Error(`unsupported severity: ${o}`)}}}},c={provider:"console",minimalSeverity:"warning",logDateTime:!0,logSourceLocation:!1};let p={"":c};function s(o,t,e,r){if(t===void 0)return i=o,{verbose:s.verbose.bind(null,i),info:s.info.bind(null,i),warning:s.warning.bind(null,i),error:s.error.bind(null,i),fatal:s.fatal.bind(null,i)};if(e===void 0)h(o,t);else if(typeof e=="number"&&r===void 0)h(o,t);else if(typeof e=="string"&&r===void 0)h(o,e,0,t);else{if(typeof e!="string"||typeof r!="number")throw new TypeError("input is valid");h(o,e,0,t)}var i}function h(o,t,e,r){const i=p[r||""]||p[""];a[o]{g.then(async _=>{i&&await i.end(),m(_)},async _=>{i&&await i.end(),b(_)})});if(!d&&i){const m=i.end();if(m&&typeof m.then=="function")return new Promise((b,_)=>{m.then(()=>{b(g)},v=>{_(v)})})}return g}begin(o,t,e){if(!this._started)throw new Error("profiler is not started yet");if(e===void 0){const r=(0,n.now)();return this.flush(r),new f(o,t,r,i=>this.endSync(i))}{const r=e.beginTimer();return new f(o,t,0,async i=>this.end(i),r,e)}}async end(o){const t=await o.checkTimer();this._timingEvents.length=this._flushBatchSize||o-this._flushTime>=this._flushIntervalInMilliseconds){for(const t=this._flushPointer;this._flushPointerperformance.now():Date.now},2644:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.Model=void 0;const u=a(5686),c=a(1446),p=a(7070),s=a(9395),h=a(2517);var f=s.onnxruntime.experimental.fbs;n.Model=class{constructor(){}load(l,o,t){if(!t)try{return void this.loadFromOnnxFormat(l,o)}catch(e){if(t!==void 0)throw e}this.loadFromOrtFormat(l,o)}loadFromOnnxFormat(l,o){const t=c.onnx.ModelProto.decode(l);if(h.LongUtil.longToNumber(t.irVersion)<3)throw new Error("only support ONNX model with IR_VERSION>=3");this._opsets=t.opsetImport.map(e=>({domain:e.domain,version:h.LongUtil.longToNumber(e.version)})),this._graph=p.Graph.from(t.graph,o)}loadFromOrtFormat(l,o){const t=new u.flatbuffers.ByteBuffer(l),e=f.InferenceSession.getRootAsInferenceSession(t).model();if(h.LongUtil.longToNumber(e.irVersion())<3)throw new Error("only support ONNX model with IR_VERSION>=3");this._opsets=[];for(let r=0;r{Object.defineProperty(n,"__esModule",{value:!0}),n.FLOAT_TYPES=n.INT_TYPES=n.NUMBER_TYPES=void 0,n.NUMBER_TYPES=["float32","float64","int32","int16","int8","uint16","uint32","uint8"],n.INT_TYPES=["int32","int16","int8","uint16","uint32","uint8"],n.FLOAT_TYPES=["float32","float64"]},1047:(y,n)=>{function a(u,c){if(c.endsWith("+")){const p=Number.parseInt(c.substring(0,c.length-1),10);return!isNaN(p)&&p<=u}if(c.split("-").length===2){const p=c.split("-"),s=Number.parseInt(p[0],10),h=Number.parseInt(p[1],10);return!isNaN(s)&&!isNaN(h)&&s<=u&&u<=h}return Number.parseInt(c,10)===u}Object.defineProperty(n,"__esModule",{value:!0}),n.resolveOperator=void 0,n.resolveOperator=function(u,c,p){for(const s of p){const h=s[0],f=s[1],l=s[2],o=s[3],t=s[4];if(u.opType===h){for(const e of c)if((e.domain===f||e.domain==="ai.onnx"&&f==="")&&a(e.version,l))return{opImpl:o,opInit:t}}}throw new TypeError(`cannot resolve operator '${u.opType}' with opsets: ${c.map(s=>`${s.domain||"ai.onnx"} v${s.version}`).join(", ")}`)}},9395:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.onnxruntime=void 0;const u=a(5686);var c,p;c=n.onnxruntime||(n.onnxruntime={}),function(s){(function(h){h[h.UNDEFINED=0]="UNDEFINED",h[h.FLOAT=1]="FLOAT",h[h.INT=2]="INT",h[h.STRING=3]="STRING",h[h.TENSOR=4]="TENSOR",h[h.GRAPH=5]="GRAPH",h[h.FLOATS=6]="FLOATS",h[h.INTS=7]="INTS",h[h.STRINGS=8]="STRINGS",h[h.TENSORS=9]="TENSORS",h[h.GRAPHS=10]="GRAPHS",h[h.SPARSE_TENSOR=11]="SPARSE_TENSOR",h[h.SPARSE_TENSORS=12]="SPARSE_TENSORS"})(s.AttributeType||(s.AttributeType={}))}((p=c.experimental||(c.experimental={})).fbs||(p.fbs={})),function(s){(function(h){(function(f){(function(l){l[l.UNKNOWN=0]="UNKNOWN",l[l.VALUE=1]="VALUE",l[l.PARAM=2]="PARAM"})(f.DimensionValueType||(f.DimensionValueType={}))})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){(function(l){l[l.UNDEFINED=0]="UNDEFINED",l[l.FLOAT=1]="FLOAT",l[l.UINT8=2]="UINT8",l[l.INT8=3]="INT8",l[l.UINT16=4]="UINT16",l[l.INT16=5]="INT16",l[l.INT32=6]="INT32",l[l.INT64=7]="INT64",l[l.STRING=8]="STRING",l[l.BOOL=9]="BOOL",l[l.FLOAT16=10]="FLOAT16",l[l.DOUBLE=11]="DOUBLE",l[l.UINT32=12]="UINT32",l[l.UINT64=13]="UINT64",l[l.COMPLEX64=14]="COMPLEX64",l[l.COMPLEX128=15]="COMPLEX128",l[l.BFLOAT16=16]="BFLOAT16"})(f.TensorDataType||(f.TensorDataType={}))})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){(function(l){l[l.Primitive=0]="Primitive",l[l.Fused=1]="Fused"})(f.NodeType||(f.NodeType={}))})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){(function(l){l[l.NONE=0]="NONE",l[l.tensor_type=1]="tensor_type",l[l.sequence_type=2]="sequence_type",l[l.map_type=3]="map_type"})(f.TypeInfoValue||(f.TypeInfoValue={}))})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsShape(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsShape(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}dim(t,e){let r=this.bb.__offset(this.bb_pos,4);return r?(e||new s.experimental.fbs.Dimension).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}dimLength(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.__vector_len(this.bb_pos+t):0}static startShape(t){t.startObject(1)}static addDim(t,e){t.addFieldOffset(0,e,0)}static createDimVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startDimVector(t,e){t.startVector(4,e,4)}static endShape(t){return t.endObject()}static createShape(t,e){return l.startShape(t),l.addDim(t,e),l.endShape(t)}}f.Shape=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsDimension(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsDimension(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}value(t){let e=this.bb.__offset(this.bb_pos,4);return e?(t||new s.experimental.fbs.DimensionValue).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}denotation(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}static startDimension(t){t.startObject(2)}static addValue(t,e){t.addFieldOffset(0,e,0)}static addDenotation(t,e){t.addFieldOffset(1,e,0)}static endDimension(t){return t.endObject()}static createDimension(t,e,r){return l.startDimension(t),l.addValue(t,e),l.addDenotation(t,r),l.endDimension(t)}}f.Dimension=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsDimensionValue(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsDimensionValue(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}dimType(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readInt8(this.bb_pos+t):s.experimental.fbs.DimensionValueType.UNKNOWN}dimValue(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}dimParam(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.__string(this.bb_pos+e,t):null}static startDimensionValue(t){t.startObject(3)}static addDimType(t,e){t.addFieldInt8(0,e,s.experimental.fbs.DimensionValueType.UNKNOWN)}static addDimValue(t,e){t.addFieldInt64(1,e,t.createLong(0,0))}static addDimParam(t,e){t.addFieldOffset(2,e,0)}static endDimensionValue(t){return t.endObject()}static createDimensionValue(t,e,r,i){return l.startDimensionValue(t),l.addDimType(t,e),l.addDimValue(t,r),l.addDimParam(t,i),l.endDimensionValue(t)}}f.DimensionValue=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsTensorTypeAndShape(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsTensorTypeAndShape(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}elemType(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.TensorDataType.UNDEFINED}shape(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.Shape).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startTensorTypeAndShape(t){t.startObject(2)}static addElemType(t,e){t.addFieldInt32(0,e,s.experimental.fbs.TensorDataType.UNDEFINED)}static addShape(t,e){t.addFieldOffset(1,e,0)}static endTensorTypeAndShape(t){return t.endObject()}static createTensorTypeAndShape(t,e,r){return l.startTensorTypeAndShape(t),l.addElemType(t,e),l.addShape(t,r),l.endTensorTypeAndShape(t)}}f.TensorTypeAndShape=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsMapType(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsMapType(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}keyType(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.TensorDataType.UNDEFINED}valueType(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.TypeInfo).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startMapType(t){t.startObject(2)}static addKeyType(t,e){t.addFieldInt32(0,e,s.experimental.fbs.TensorDataType.UNDEFINED)}static addValueType(t,e){t.addFieldOffset(1,e,0)}static endMapType(t){return t.endObject()}static createMapType(t,e,r){return l.startMapType(t),l.addKeyType(t,e),l.addValueType(t,r),l.endMapType(t)}}f.MapType=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsSequenceType(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsSequenceType(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}elemType(t){let e=this.bb.__offset(this.bb_pos,4);return e?(t||new s.experimental.fbs.TypeInfo).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startSequenceType(t){t.startObject(1)}static addElemType(t,e){t.addFieldOffset(0,e,0)}static endSequenceType(t){return t.endObject()}static createSequenceType(t,e){return l.startSequenceType(t),l.addElemType(t,e),l.endSequenceType(t)}}f.SequenceType=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(h.fbs||(h.fbs={})).EdgeEnd=class{constructor(){this.bb=null,this.bb_pos=0}__init(f,l){return this.bb_pos=f,this.bb=l,this}nodeIndex(){return this.bb.readUint32(this.bb_pos)}srcArgIndex(){return this.bb.readInt32(this.bb_pos+4)}dstArgIndex(){return this.bb.readInt32(this.bb_pos+8)}static createEdgeEnd(f,l,o,t){return f.prep(4,12),f.writeInt32(t),f.writeInt32(o),f.writeInt32(l),f.offset()}}})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsNodeEdge(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsNodeEdge(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}nodeIndex(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readUint32(this.bb_pos+t):0}inputEdges(t,e){let r=this.bb.__offset(this.bb_pos,6);return r?(e||new s.experimental.fbs.EdgeEnd).__init(this.bb.__vector(this.bb_pos+r)+12*t,this.bb):null}inputEdgesLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}outputEdges(t,e){let r=this.bb.__offset(this.bb_pos,8);return r?(e||new s.experimental.fbs.EdgeEnd).__init(this.bb.__vector(this.bb_pos+r)+12*t,this.bb):null}outputEdgesLength(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.__vector_len(this.bb_pos+t):0}static startNodeEdge(t){t.startObject(3)}static addNodeIndex(t,e){t.addFieldInt32(0,e,0)}static addInputEdges(t,e){t.addFieldOffset(1,e,0)}static startInputEdgesVector(t,e){t.startVector(12,e,4)}static addOutputEdges(t,e){t.addFieldOffset(2,e,0)}static startOutputEdgesVector(t,e){t.startVector(12,e,4)}static endNodeEdge(t){return t.endObject()}static createNodeEdge(t,e,r,i){return l.startNodeEdge(t),l.addNodeIndex(t,e),l.addInputEdges(t,r),l.addOutputEdges(t,i),l.endNodeEdge(t)}}f.NodeEdge=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsNode(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsNode(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}name(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}docString(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}domain(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.__string(this.bb_pos+e,t):null}sinceVersion(){let t=this.bb.__offset(this.bb_pos,10);return t?this.bb.readInt32(this.bb_pos+t):0}index(){let t=this.bb.__offset(this.bb_pos,12);return t?this.bb.readUint32(this.bb_pos+t):0}opType(t){let e=this.bb.__offset(this.bb_pos,14);return e?this.bb.__string(this.bb_pos+e,t):null}type(){let t=this.bb.__offset(this.bb_pos,16);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.NodeType.Primitive}executionProviderType(t){let e=this.bb.__offset(this.bb_pos,18);return e?this.bb.__string(this.bb_pos+e,t):null}inputs(t,e){let r=this.bb.__offset(this.bb_pos,20);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}inputsLength(){let t=this.bb.__offset(this.bb_pos,20);return t?this.bb.__vector_len(this.bb_pos+t):0}outputs(t,e){let r=this.bb.__offset(this.bb_pos,22);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}outputsLength(){let t=this.bb.__offset(this.bb_pos,22);return t?this.bb.__vector_len(this.bb_pos+t):0}attributes(t,e){let r=this.bb.__offset(this.bb_pos,24);return r?(e||new s.experimental.fbs.Attribute).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}attributesLength(){let t=this.bb.__offset(this.bb_pos,24);return t?this.bb.__vector_len(this.bb_pos+t):0}inputArgCounts(t){let e=this.bb.__offset(this.bb_pos,26);return e?this.bb.readInt32(this.bb.__vector(this.bb_pos+e)+4*t):0}inputArgCountsLength(){let t=this.bb.__offset(this.bb_pos,26);return t?this.bb.__vector_len(this.bb_pos+t):0}inputArgCountsArray(){let t=this.bb.__offset(this.bb_pos,26);return t?new Int32Array(this.bb.bytes().buffer,this.bb.bytes().byteOffset+this.bb.__vector(this.bb_pos+t),this.bb.__vector_len(this.bb_pos+t)):null}implicitInputs(t,e){let r=this.bb.__offset(this.bb_pos,28);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}implicitInputsLength(){let t=this.bb.__offset(this.bb_pos,28);return t?this.bb.__vector_len(this.bb_pos+t):0}static startNode(t){t.startObject(13)}static addName(t,e){t.addFieldOffset(0,e,0)}static addDocString(t,e){t.addFieldOffset(1,e,0)}static addDomain(t,e){t.addFieldOffset(2,e,0)}static addSinceVersion(t,e){t.addFieldInt32(3,e,0)}static addIndex(t,e){t.addFieldInt32(4,e,0)}static addOpType(t,e){t.addFieldOffset(5,e,0)}static addType(t,e){t.addFieldInt32(6,e,s.experimental.fbs.NodeType.Primitive)}static addExecutionProviderType(t,e){t.addFieldOffset(7,e,0)}static addInputs(t,e){t.addFieldOffset(8,e,0)}static createInputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startInputsVector(t,e){t.startVector(4,e,4)}static addOutputs(t,e){t.addFieldOffset(9,e,0)}static createOutputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startOutputsVector(t,e){t.startVector(4,e,4)}static addAttributes(t,e){t.addFieldOffset(10,e,0)}static createAttributesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startAttributesVector(t,e){t.startVector(4,e,4)}static addInputArgCounts(t,e){t.addFieldOffset(11,e,0)}static createInputArgCountsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addInt32(e[r]);return t.endVector()}static startInputArgCountsVector(t,e){t.startVector(4,e,4)}static addImplicitInputs(t,e){t.addFieldOffset(12,e,0)}static createImplicitInputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startImplicitInputsVector(t,e){t.startVector(4,e,4)}static endNode(t){return t.endObject()}static createNode(t,e,r,i,d,g,m,b,_,v,w,S,A,O){return l.startNode(t),l.addName(t,e),l.addDocString(t,r),l.addDomain(t,i),l.addSinceVersion(t,d),l.addIndex(t,g),l.addOpType(t,m),l.addType(t,b),l.addExecutionProviderType(t,_),l.addInputs(t,v),l.addOutputs(t,w),l.addAttributes(t,S),l.addInputArgCounts(t,A),l.addImplicitInputs(t,O),l.endNode(t)}}f.Node=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsValueInfo(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsValueInfo(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}name(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}docString(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}type(t){let e=this.bb.__offset(this.bb_pos,8);return e?(t||new s.experimental.fbs.TypeInfo).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startValueInfo(t){t.startObject(3)}static addName(t,e){t.addFieldOffset(0,e,0)}static addDocString(t,e){t.addFieldOffset(1,e,0)}static addType(t,e){t.addFieldOffset(2,e,0)}static endValueInfo(t){return t.endObject()}static createValueInfo(t,e,r,i){return l.startValueInfo(t),l.addName(t,e),l.addDocString(t,r),l.addType(t,i),l.endValueInfo(t)}}f.ValueInfo=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsTypeInfo(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsTypeInfo(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}denotation(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}valueType(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.readUint8(this.bb_pos+t):s.experimental.fbs.TypeInfoValue.NONE}value(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.__union(t,this.bb_pos+e):null}static startTypeInfo(t){t.startObject(3)}static addDenotation(t,e){t.addFieldOffset(0,e,0)}static addValueType(t,e){t.addFieldInt8(1,e,s.experimental.fbs.TypeInfoValue.NONE)}static addValue(t,e){t.addFieldOffset(2,e,0)}static endTypeInfo(t){return t.endObject()}static createTypeInfo(t,e,r,i){return l.startTypeInfo(t),l.addDenotation(t,e),l.addValueType(t,r),l.addValue(t,i),l.endTypeInfo(t)}}f.TypeInfo=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsOperatorSetId(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsOperatorSetId(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}domain(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}version(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}static startOperatorSetId(t){t.startObject(2)}static addDomain(t,e){t.addFieldOffset(0,e,0)}static addVersion(t,e){t.addFieldInt64(1,e,t.createLong(0,0))}static endOperatorSetId(t){return t.endObject()}static createOperatorSetId(t,e,r){return l.startOperatorSetId(t),l.addDomain(t,e),l.addVersion(t,r),l.endOperatorSetId(t)}}f.OperatorSetId=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsTensor(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsTensor(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}name(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}docString(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}dims(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.readInt64(this.bb.__vector(this.bb_pos+e)+8*t):this.bb.createLong(0,0)}dimsLength(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.__vector_len(this.bb_pos+t):0}dataType(){let t=this.bb.__offset(this.bb_pos,10);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.TensorDataType.UNDEFINED}rawData(t){let e=this.bb.__offset(this.bb_pos,12);return e?this.bb.readUint8(this.bb.__vector(this.bb_pos+e)+t):0}rawDataLength(){let t=this.bb.__offset(this.bb_pos,12);return t?this.bb.__vector_len(this.bb_pos+t):0}rawDataArray(){let t=this.bb.__offset(this.bb_pos,12);return t?new Uint8Array(this.bb.bytes().buffer,this.bb.bytes().byteOffset+this.bb.__vector(this.bb_pos+t),this.bb.__vector_len(this.bb_pos+t)):null}stringData(t,e){let r=this.bb.__offset(this.bb_pos,14);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}stringDataLength(){let t=this.bb.__offset(this.bb_pos,14);return t?this.bb.__vector_len(this.bb_pos+t):0}static startTensor(t){t.startObject(6)}static addName(t,e){t.addFieldOffset(0,e,0)}static addDocString(t,e){t.addFieldOffset(1,e,0)}static addDims(t,e){t.addFieldOffset(2,e,0)}static createDimsVector(t,e){t.startVector(8,e.length,8);for(let r=e.length-1;r>=0;r--)t.addInt64(e[r]);return t.endVector()}static startDimsVector(t,e){t.startVector(8,e,8)}static addDataType(t,e){t.addFieldInt32(3,e,s.experimental.fbs.TensorDataType.UNDEFINED)}static addRawData(t,e){t.addFieldOffset(4,e,0)}static createRawDataVector(t,e){t.startVector(1,e.length,1);for(let r=e.length-1;r>=0;r--)t.addInt8(e[r]);return t.endVector()}static startRawDataVector(t,e){t.startVector(1,e,1)}static addStringData(t,e){t.addFieldOffset(5,e,0)}static createStringDataVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startStringDataVector(t,e){t.startVector(4,e,4)}static endTensor(t){return t.endObject()}static createTensor(t,e,r,i,d,g,m){return l.startTensor(t),l.addName(t,e),l.addDocString(t,r),l.addDims(t,i),l.addDataType(t,d),l.addRawData(t,g),l.addStringData(t,m),l.endTensor(t)}}f.Tensor=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsSparseTensor(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsSparseTensor(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}values(t){let e=this.bb.__offset(this.bb_pos,4);return e?(t||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}indices(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}dims(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.readInt64(this.bb.__vector(this.bb_pos+e)+8*t):this.bb.createLong(0,0)}dimsLength(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.__vector_len(this.bb_pos+t):0}static startSparseTensor(t){t.startObject(3)}static addValues(t,e){t.addFieldOffset(0,e,0)}static addIndices(t,e){t.addFieldOffset(1,e,0)}static addDims(t,e){t.addFieldOffset(2,e,0)}static createDimsVector(t,e){t.startVector(8,e.length,8);for(let r=e.length-1;r>=0;r--)t.addInt64(e[r]);return t.endVector()}static startDimsVector(t,e){t.startVector(8,e,8)}static endSparseTensor(t){return t.endObject()}static createSparseTensor(t,e,r,i){return l.startSparseTensor(t),l.addValues(t,e),l.addIndices(t,r),l.addDims(t,i),l.endSparseTensor(t)}}f.SparseTensor=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsAttribute(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsAttribute(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}name(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}docString(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.__string(this.bb_pos+e,t):null}type(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.readInt32(this.bb_pos+t):s.experimental.fbs.AttributeType.UNDEFINED}f(){let t=this.bb.__offset(this.bb_pos,10);return t?this.bb.readFloat32(this.bb_pos+t):0}i(){let t=this.bb.__offset(this.bb_pos,12);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}s(t){let e=this.bb.__offset(this.bb_pos,14);return e?this.bb.__string(this.bb_pos+e,t):null}t(t){let e=this.bb.__offset(this.bb_pos,16);return e?(t||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}g(t){let e=this.bb.__offset(this.bb_pos,18);return e?(t||new s.experimental.fbs.Graph).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}floats(t){let e=this.bb.__offset(this.bb_pos,20);return e?this.bb.readFloat32(this.bb.__vector(this.bb_pos+e)+4*t):0}floatsLength(){let t=this.bb.__offset(this.bb_pos,20);return t?this.bb.__vector_len(this.bb_pos+t):0}floatsArray(){let t=this.bb.__offset(this.bb_pos,20);return t?new Float32Array(this.bb.bytes().buffer,this.bb.bytes().byteOffset+this.bb.__vector(this.bb_pos+t),this.bb.__vector_len(this.bb_pos+t)):null}ints(t){let e=this.bb.__offset(this.bb_pos,22);return e?this.bb.readInt64(this.bb.__vector(this.bb_pos+e)+8*t):this.bb.createLong(0,0)}intsLength(){let t=this.bb.__offset(this.bb_pos,22);return t?this.bb.__vector_len(this.bb_pos+t):0}strings(t,e){let r=this.bb.__offset(this.bb_pos,24);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}stringsLength(){let t=this.bb.__offset(this.bb_pos,24);return t?this.bb.__vector_len(this.bb_pos+t):0}tensors(t,e){let r=this.bb.__offset(this.bb_pos,26);return r?(e||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}tensorsLength(){let t=this.bb.__offset(this.bb_pos,26);return t?this.bb.__vector_len(this.bb_pos+t):0}graphs(t,e){let r=this.bb.__offset(this.bb_pos,28);return r?(e||new s.experimental.fbs.Graph).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}graphsLength(){let t=this.bb.__offset(this.bb_pos,28);return t?this.bb.__vector_len(this.bb_pos+t):0}static startAttribute(t){t.startObject(13)}static addName(t,e){t.addFieldOffset(0,e,0)}static addDocString(t,e){t.addFieldOffset(1,e,0)}static addType(t,e){t.addFieldInt32(2,e,s.experimental.fbs.AttributeType.UNDEFINED)}static addF(t,e){t.addFieldFloat32(3,e,0)}static addI(t,e){t.addFieldInt64(4,e,t.createLong(0,0))}static addS(t,e){t.addFieldOffset(5,e,0)}static addT(t,e){t.addFieldOffset(6,e,0)}static addG(t,e){t.addFieldOffset(7,e,0)}static addFloats(t,e){t.addFieldOffset(8,e,0)}static createFloatsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addFloat32(e[r]);return t.endVector()}static startFloatsVector(t,e){t.startVector(4,e,4)}static addInts(t,e){t.addFieldOffset(9,e,0)}static createIntsVector(t,e){t.startVector(8,e.length,8);for(let r=e.length-1;r>=0;r--)t.addInt64(e[r]);return t.endVector()}static startIntsVector(t,e){t.startVector(8,e,8)}static addStrings(t,e){t.addFieldOffset(10,e,0)}static createStringsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startStringsVector(t,e){t.startVector(4,e,4)}static addTensors(t,e){t.addFieldOffset(11,e,0)}static createTensorsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startTensorsVector(t,e){t.startVector(4,e,4)}static addGraphs(t,e){t.addFieldOffset(12,e,0)}static createGraphsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startGraphsVector(t,e){t.startVector(4,e,4)}static endAttribute(t){return t.endObject()}static createAttribute(t,e,r,i,d,g,m,b,_,v,w,S,A,O){return l.startAttribute(t),l.addName(t,e),l.addDocString(t,r),l.addType(t,i),l.addF(t,d),l.addI(t,g),l.addS(t,m),l.addT(t,b),l.addG(t,_),l.addFloats(t,v),l.addInts(t,w),l.addStrings(t,S),l.addTensors(t,A),l.addGraphs(t,O),l.endAttribute(t)}}f.Attribute=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsGraph(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsGraph(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}initializers(t,e){let r=this.bb.__offset(this.bb_pos,4);return r?(e||new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}initializersLength(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.__vector_len(this.bb_pos+t):0}nodeArgs(t,e){let r=this.bb.__offset(this.bb_pos,6);return r?(e||new s.experimental.fbs.ValueInfo).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}nodeArgsLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}nodes(t,e){let r=this.bb.__offset(this.bb_pos,8);return r?(e||new s.experimental.fbs.Node).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}nodesLength(){let t=this.bb.__offset(this.bb_pos,8);return t?this.bb.__vector_len(this.bb_pos+t):0}maxNodeIndex(){let t=this.bb.__offset(this.bb_pos,10);return t?this.bb.readUint32(this.bb_pos+t):0}nodeEdges(t,e){let r=this.bb.__offset(this.bb_pos,12);return r?(e||new s.experimental.fbs.NodeEdge).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}nodeEdgesLength(){let t=this.bb.__offset(this.bb_pos,12);return t?this.bb.__vector_len(this.bb_pos+t):0}inputs(t,e){let r=this.bb.__offset(this.bb_pos,14);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}inputsLength(){let t=this.bb.__offset(this.bb_pos,14);return t?this.bb.__vector_len(this.bb_pos+t):0}outputs(t,e){let r=this.bb.__offset(this.bb_pos,16);return r?this.bb.__string(this.bb.__vector(this.bb_pos+r)+4*t,e):null}outputsLength(){let t=this.bb.__offset(this.bb_pos,16);return t?this.bb.__vector_len(this.bb_pos+t):0}sparseInitializers(t,e){let r=this.bb.__offset(this.bb_pos,18);return r?(e||new s.experimental.fbs.SparseTensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}sparseInitializersLength(){let t=this.bb.__offset(this.bb_pos,18);return t?this.bb.__vector_len(this.bb_pos+t):0}static startGraph(t){t.startObject(8)}static addInitializers(t,e){t.addFieldOffset(0,e,0)}static createInitializersVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startInitializersVector(t,e){t.startVector(4,e,4)}static addNodeArgs(t,e){t.addFieldOffset(1,e,0)}static createNodeArgsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startNodeArgsVector(t,e){t.startVector(4,e,4)}static addNodes(t,e){t.addFieldOffset(2,e,0)}static createNodesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startNodesVector(t,e){t.startVector(4,e,4)}static addMaxNodeIndex(t,e){t.addFieldInt32(3,e,0)}static addNodeEdges(t,e){t.addFieldOffset(4,e,0)}static createNodeEdgesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startNodeEdgesVector(t,e){t.startVector(4,e,4)}static addInputs(t,e){t.addFieldOffset(5,e,0)}static createInputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startInputsVector(t,e){t.startVector(4,e,4)}static addOutputs(t,e){t.addFieldOffset(6,e,0)}static createOutputsVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startOutputsVector(t,e){t.startVector(4,e,4)}static addSparseInitializers(t,e){t.addFieldOffset(7,e,0)}static createSparseInitializersVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startSparseInitializersVector(t,e){t.startVector(4,e,4)}static endGraph(t){return t.endObject()}static createGraph(t,e,r,i,d,g,m,b,_){return l.startGraph(t),l.addInitializers(t,e),l.addNodeArgs(t,r),l.addNodes(t,i),l.addMaxNodeIndex(t,d),l.addNodeEdges(t,g),l.addInputs(t,m),l.addOutputs(t,b),l.addSparseInitializers(t,_),l.endGraph(t)}}f.Graph=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsModel(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsModel(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}irVersion(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}opsetImport(t,e){let r=this.bb.__offset(this.bb_pos,6);return r?(e||new s.experimental.fbs.OperatorSetId).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}opsetImportLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}producerName(t){let e=this.bb.__offset(this.bb_pos,8);return e?this.bb.__string(this.bb_pos+e,t):null}producerVersion(t){let e=this.bb.__offset(this.bb_pos,10);return e?this.bb.__string(this.bb_pos+e,t):null}domain(t){let e=this.bb.__offset(this.bb_pos,12);return e?this.bb.__string(this.bb_pos+e,t):null}modelVersion(){let t=this.bb.__offset(this.bb_pos,14);return t?this.bb.readInt64(this.bb_pos+t):this.bb.createLong(0,0)}docString(t){let e=this.bb.__offset(this.bb_pos,16);return e?this.bb.__string(this.bb_pos+e,t):null}graph(t){let e=this.bb.__offset(this.bb_pos,18);return e?(t||new s.experimental.fbs.Graph).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}graphDocString(t){let e=this.bb.__offset(this.bb_pos,20);return e?this.bb.__string(this.bb_pos+e,t):null}static startModel(t){t.startObject(9)}static addIrVersion(t,e){t.addFieldInt64(0,e,t.createLong(0,0))}static addOpsetImport(t,e){t.addFieldOffset(1,e,0)}static createOpsetImportVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startOpsetImportVector(t,e){t.startVector(4,e,4)}static addProducerName(t,e){t.addFieldOffset(2,e,0)}static addProducerVersion(t,e){t.addFieldOffset(3,e,0)}static addDomain(t,e){t.addFieldOffset(4,e,0)}static addModelVersion(t,e){t.addFieldInt64(5,e,t.createLong(0,0))}static addDocString(t,e){t.addFieldOffset(6,e,0)}static addGraph(t,e){t.addFieldOffset(7,e,0)}static addGraphDocString(t,e){t.addFieldOffset(8,e,0)}static endModel(t){return t.endObject()}static createModel(t,e,r,i,d,g,m,b,_,v){return l.startModel(t),l.addIrVersion(t,e),l.addOpsetImport(t,r),l.addProducerName(t,i),l.addProducerVersion(t,d),l.addDomain(t,g),l.addModelVersion(t,m),l.addDocString(t,b),l.addGraph(t,_),l.addGraphDocString(t,v),l.endModel(t)}}f.Model=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsKernelCreateInfos(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsKernelCreateInfos(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}nodeIndices(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.readUint32(this.bb.__vector(this.bb_pos+e)+4*t):0}nodeIndicesLength(){let t=this.bb.__offset(this.bb_pos,4);return t?this.bb.__vector_len(this.bb_pos+t):0}nodeIndicesArray(){let t=this.bb.__offset(this.bb_pos,4);return t?new Uint32Array(this.bb.bytes().buffer,this.bb.bytes().byteOffset+this.bb.__vector(this.bb_pos+t),this.bb.__vector_len(this.bb_pos+t)):null}kernelDefHashes(t){let e=this.bb.__offset(this.bb_pos,6);return e?this.bb.readUint64(this.bb.__vector(this.bb_pos+e)+8*t):this.bb.createLong(0,0)}kernelDefHashesLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}static startKernelCreateInfos(t){t.startObject(2)}static addNodeIndices(t,e){t.addFieldOffset(0,e,0)}static createNodeIndicesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addInt32(e[r]);return t.endVector()}static startNodeIndicesVector(t,e){t.startVector(4,e,4)}static addKernelDefHashes(t,e){t.addFieldOffset(1,e,0)}static createKernelDefHashesVector(t,e){t.startVector(8,e.length,8);for(let r=e.length-1;r>=0;r--)t.addInt64(e[r]);return t.endVector()}static startKernelDefHashesVector(t,e){t.startVector(8,e,8)}static endKernelCreateInfos(t){return t.endObject()}static createKernelCreateInfos(t,e,r){return l.startKernelCreateInfos(t),l.addNodeIndices(t,e),l.addKernelDefHashes(t,r),l.endKernelCreateInfos(t)}}f.KernelCreateInfos=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsSubGraphSessionState(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsSubGraphSessionState(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}graphId(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}sessionState(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.SessionState).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startSubGraphSessionState(t){t.startObject(2)}static addGraphId(t,e){t.addFieldOffset(0,e,0)}static addSessionState(t,e){t.addFieldOffset(1,e,0)}static endSubGraphSessionState(t){let e=t.endObject();return t.requiredField(e,4),e}static createSubGraphSessionState(t,e,r){return l.startSubGraphSessionState(t),l.addGraphId(t,e),l.addSessionState(t,r),l.endSubGraphSessionState(t)}}f.SubGraphSessionState=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsSessionState(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsSessionState(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}kernels(t){let e=this.bb.__offset(this.bb_pos,4);return e?(t||new s.experimental.fbs.KernelCreateInfos).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}subGraphSessionStates(t,e){let r=this.bb.__offset(this.bb_pos,6);return r?(e||new s.experimental.fbs.SubGraphSessionState).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos+r)+4*t),this.bb):null}subGraphSessionStatesLength(){let t=this.bb.__offset(this.bb_pos,6);return t?this.bb.__vector_len(this.bb_pos+t):0}static startSessionState(t){t.startObject(2)}static addKernels(t,e){t.addFieldOffset(0,e,0)}static addSubGraphSessionStates(t,e){t.addFieldOffset(1,e,0)}static createSubGraphSessionStatesVector(t,e){t.startVector(4,e.length,4);for(let r=e.length-1;r>=0;r--)t.addOffset(e[r]);return t.endVector()}static startSubGraphSessionStatesVector(t,e){t.startVector(4,e,4)}static endSessionState(t){return t.endObject()}static createSessionState(t,e,r){return l.startSessionState(t),l.addKernels(t,e),l.addSubGraphSessionStates(t,r),l.endSessionState(t)}}f.SessionState=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={})),function(s){(function(h){(function(f){class l{constructor(){this.bb=null,this.bb_pos=0}__init(t,e){return this.bb_pos=t,this.bb=e,this}static getRootAsInferenceSession(t,e){return(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static getSizePrefixedRootAsInferenceSession(t,e){return t.setPosition(t.position()+u.flatbuffers.SIZE_PREFIX_LENGTH),(e||new l).__init(t.readInt32(t.position())+t.position(),t)}static bufferHasIdentifier(t){return t.__has_identifier("ORTM")}ortVersion(t){let e=this.bb.__offset(this.bb_pos,4);return e?this.bb.__string(this.bb_pos+e,t):null}model(t){let e=this.bb.__offset(this.bb_pos,6);return e?(t||new s.experimental.fbs.Model).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}sessionState(t){let e=this.bb.__offset(this.bb_pos,8);return e?(t||new s.experimental.fbs.SessionState).__init(this.bb.__indirect(this.bb_pos+e),this.bb):null}static startInferenceSession(t){t.startObject(3)}static addOrtVersion(t,e){t.addFieldOffset(0,e,0)}static addModel(t,e){t.addFieldOffset(1,e,0)}static addSessionState(t,e){t.addFieldOffset(2,e,0)}static endInferenceSession(t){return t.endObject()}static finishInferenceSessionBuffer(t,e){t.finish(e,"ORTM")}static finishSizePrefixedInferenceSessionBuffer(t,e){t.finish(e,"ORTM",!0)}static createInferenceSession(t,e,r,i){return l.startInferenceSession(t),l.addOrtVersion(t,e),l.addModel(t,r),l.addSessionState(t,i),l.endInferenceSession(t)}}f.InferenceSession=l})(h.fbs||(h.fbs={}))})(s.experimental||(s.experimental={}))}(n.onnxruntime||(n.onnxruntime={}))},7448:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.OnnxjsSessionHandler=void 0;const u=a(1670),c=a(9162);n.OnnxjsSessionHandler=class{constructor(p){this.session=p,this.inputNames=this.session.inputNames,this.outputNames=this.session.outputNames}async dispose(){}async run(p,s,h){const f=new Map;for(const t in p)if(Object.hasOwnProperty.call(p,t)){const e=p[t];f.set(t,new c.Tensor(e.dims,e.type,void 0,void 0,e.data))}const l=await this.session.run(f),o={};return l.forEach((t,e)=>{o[e]=new u.Tensor(t.type,t.data,t.dims)}),o}startProfiling(){this.session.startProfiling()}endProfiling(){this.session.endProfiling()}}},6919:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.Session=void 0;const u=a(7067),c=a(1296),p=a(7091),s=a(1036),h=a(6231),f=a(2644);n.Session=class{constructor(l={}){this._initialized=!1,this.backendHint=l.backendHint,this.profiler=h.Profiler.create(l.profiler),this.context={profiler:this.profiler,graphInputTypes:[],graphInputDims:[]}}get inputNames(){return this._model.graph.getInputNames()}get outputNames(){return this._model.graph.getOutputNames()}startProfiling(){this.profiler.start()}endProfiling(){this.profiler.stop()}async loadModel(l,o,t){await this.profiler.event("session","Session.loadModel",async()=>{const e=await(0,p.resolveBackend)(this.backendHint);if(this.sessionHandler=e.createSessionHandler(this.context),this._model=new f.Model,typeof l=="string"){const r=l.endsWith(".ort");if(typeof fetch>"u"){const i=await(0,c.promisify)(u.readFile)(l);this.initialize(i,r)}else{const i=await fetch(l),d=await i.arrayBuffer();this.initialize(new Uint8Array(d),r)}}else if(ArrayBuffer.isView(l))this.initialize(l);else{const r=new Uint8Array(l,o||0,t||l.byteLength);this.initialize(r)}})}initialize(l,o){if(this._initialized)throw new Error("already initialized");this.profiler.event("session","Session.initialize",()=>{const t=this.sessionHandler.transformGraph?this.sessionHandler:void 0;this._model.load(l,t,o),this.sessionHandler.onGraphInitialized&&this.sessionHandler.onGraphInitialized(this._model.graph),this.initializeOps(this._model.graph),this._executionPlan=new s.ExecutionPlan(this._model.graph,this._ops,this.profiler)}),this._initialized=!0}async run(l){if(!this._initialized)throw new Error("session not initialized yet");return this.profiler.event("session","Session.run",async()=>{const o=this.normalizeAndValidateInputs(l),t=await this._executionPlan.execute(this.sessionHandler,o);return this.createOutput(t)})}normalizeAndValidateInputs(l){const o=this._model.graph.getInputNames();if(Array.isArray(l)){if(l.length!==o.length)throw new Error(`incorrect input array length: expected ${o.length} but got ${l.length}`)}else{if(l.size!==o.length)throw new Error(`incorrect input map size: expected ${o.length} but got ${l.size}`);const t=new Array(l.size);let e=0;for(let r=0;rtypeof O=="string")))throw new TypeError("cache should be a string array");A&&(this.cache=new Array(S))}else{if(v!==void 0){const O=e(m);if(!(v instanceof O))throw new TypeError(`cache should be type ${O.name}`)}if(A){const O=new ArrayBuffer(S*function(x){switch(x){case"bool":case"int8":case"uint8":return 1;case"int16":case"uint16":return 2;case"int32":case"uint32":case"float32":return 4;case"float64":return 8;default:throw new Error(`cannot calculate sizeof() on type ${x}`)}}(m));this.cache=function(x,I){return new(e(I))(x)}(O,m)}}}static fromProto(g){if(!g)throw new Error("cannot construct Value from an empty tensor");const m=f.ProtoUtil.tensorDataTypeFromProto(g.dataType),b=f.ProtoUtil.tensorDimsFromProto(g.dims),_=new o(b,m);if(m==="string")g.stringData.forEach((v,w)=>{_.data[w]=(0,f.decodeUtf8String)(v)});else if(g.rawData&&typeof g.rawData.byteLength=="number"&&g.rawData.byteLength>0){const v=_.data,w=new DataView(g.rawData.buffer,g.rawData.byteOffset,g.rawData.byteLength),S=t(g.dataType),A=g.rawData.byteLength/S;if(g.rawData.byteLength%S!=0)throw new Error("invalid buffer length");if(v.length!==A)throw new Error("buffer length mismatch");for(let O=0;O0){const v=_.data,w=new DataView(g.rawDataArray().buffer,g.rawDataArray().byteOffset,g.rawDataLength()),S=t(g.dataType()),A=g.rawDataLength()/S;if(g.rawDataLength()%S!=0)throw new Error("invalid buffer length");if(v.length!==A)throw new Error("buffer length mismatch");for(let O=0;O1&&I>1)return;A[S-O]=Math.max(x,I)}return A}static index(m,b){const _=new Array(b.length);return l.fillIndex(m,b,_),_}static fillIndex(m,b,_){const v=m.length-b.length;for(let w=0;w=0;Z--)x[Z]=B%S[Z],B=Math.floor(B/S[Z]);H||(l.fillIndex(x,m.dims,I),L=m.get(I)),D||(l.fillIndex(x,b.dims,N),F=b.get(N)),O.set(x,_(L,F))}}return O}}static isValidBroadcast(m,b){const _=m.length,v=b.length;if(_>v)return!1;for(let w=1;w<=_;w++)if(m[_-w]!==1&&m[_-w]!==b[v-w])return!1;return!0}static getBroadcastDims(m,b){const _=m.length,v=[];for(let w=0;w<_;w++){const S=_-1-w,A=m[S]||1;(b[b.length-1-w]||1)>1&&A===1&&v.unshift(S)}return v}}n.BroadcastUtil=l,n.arrayCopyHelper=function(g,m,b,_,v){if(_<0||_>=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(_+v>m.length)throw new Error("source indices to be copied are outside bounds");if(b+v>g.length)throw new Error("target array is too small to hold result");for(let w=0;wp.default.isLong(b)?b.toNumber():b)}static tensorValueTypeFromProto(m){return{tensorType:o.tensorDataTypeFromProto(m.elemType),shape:{dims:o.tensorDimsFromProto(m.shape.dim.map(b=>b.dimValue))}}}static tensorDimsFromORTFormat(m){const b=[];for(let _=0;_m.length)throw new Error(`invalid dimension of ${b} for sizeFromDimension as Tensor has ${m.length} dimensions.`);return e.getSizeFromDimensionRange(m,b,m.length)}static sizeToDimension(m,b){if(b<0||b>m.length)throw new Error(`invalid dimension of ${b} for sizeToDimension as Tensor has ${m.length} dimensions.`);return e.getSizeFromDimensionRange(m,0,b)}static getSizeFromDimensionRange(m,b,_){let v=1;for(let w=b;w<_;w++){if(m[w]<=0)throw new Error("cannot get valid size from specified dimension range. Most likely the range contains 0 or negative values in them.");v*=m[w]}return v}static computeStrides(m){const b=m.length;if(b===0)return[];if(b===1)return[1];const _=new Array(b);_[b-1]=1,_[b-2]=m[b-1];for(let v=b-3;v>=0;--v)_[v]=_[v+1]*m[v+1];return _}static transpose(m){return m.slice().reverse()}static indicesToOffset(m,b,_){_===void 0&&(_=m.length);let v=0;for(let w=0;w<_;++w)v+=b[w]*m[w];return v}static offsetToIndices(m,b){const _=b.length;if(_===0)return[];if(_===1)return[m*b[0]];const v=new Array(b.length);for(let w=0;w=b)throw new Error("unsupported axis for this operation.");return m<0?m+b:m}static normalizeAxes(m,b){return m.map(_=>this.normalizeAxis(_,b))}static incrementIndex(m,b,_){if(b.length===0||m.length===0)throw new Error("Index incrementing unsupported for scalar Tensor");if(_===void 0)_=b.length;else if(_<=0||_>b.length)throw new Error("Incorrect axis to increment on");for(let v=_-1;v>=0&&(m[v]++,!(m[v]=m.length)throw new Error("the dimension with value zero exceeds the dimension size of the input tensor");v[O]=m[O]}else v[O]=b[O];S*=v[O]}}const A=e.size(m);if(w!==-1){if(A%S!=0)throw new Error(`the input tensor cannot be reshaped to the requested shape. Input shape: [${m}] Output shape: [${b}]`);v[w]=A/S}else if(S!==A)throw new Error("reshapedDims and originalDims don't have matching sizes");return v}static sortBasedOnPerm(m,b){return b?b.map(_=>m[_]):m.slice().reverse()}static padShape(m,b){const _=m.length;return m.map((v,w)=>v+b[w]+b[w+_])}static areEqual(m,b){return m.length===b.length&&m.every((_,v)=>_===b[v])}static validateDimsAndCalcSize(m){if(m.length>6)throw new TypeError("Only rank 0 to 6 is supported for tensor shape.");let b=1;for(const _ of m){if(!Number.isInteger(_))throw new TypeError(`Invalid shape: ${_} is not an integer`);if(_<0||_>2147483647)throw new TypeError(`Invalid shape: length ${_} is not allowed`);b*=_}return b}static flattenShape(m,b){b<0&&(b+=m.length);const _=m.reduce((w,S)=>w*S,1),v=m.slice(b).reduce((w,S)=>w*S,1);return[_/v,v]}static squeezeShape(m,b){const _=new Array;b=e.normalizeAxes(b,m.length);for(let v=0;v=0;if(w&&m[v]!==1)throw new Error("squeeze an axis of size different than 1");(b.length===0&&m[v]>1||b.length>0&&!w)&&_.push(m[v])}return _}static unsqueezeShape(m,b){const _=new Array(m.length+b.length);_.fill(0);for(let w=0;w=_.length)throw new Error("'axes' has an out of range axis");if(_[S]!==0)throw new Error("'axes' has a duplicate axis");_[S]=1}let v=0;for(let w=0;w<_.length;w++)_[w]===0&&(_[w]=m[v++]);if(v!==m.length)throw new Error("the unsqueezed dimension could not be established");return _}}n.ShapeUtil=e,n.MathUtil=class{static sqr(g,m,b,_,v){if(_<0||_>=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(_+v>m.length)throw new Error("source indices to be copied are outside bounds");if(b+v>g.length)throw new Error("target array is too small to hold result");for(let w=0;w=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(_+v>m.length)throw new Error("source indices to be copied are outside bounds");if(b+v>g.length)throw new Error("target array is too small to hold result");for(let S=0;S=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(_+v>m.length)throw new Error("source indices to be copied are outside bounds");if(b+v>g.length)throw new Error("target array is too small to hold result");for(let S=0;S=m.length)throw new Error("sourceIndex out of bounds");if(b<0||b>=g.length)throw new Error("targetIndex out of bounds");if(_+v>m.length)throw new Error("source indices to be copied are outside bounds");if(b+v>g.length)throw new Error("target array is too small to hold result");for(let w=0;wb.push(F));const A=i.calcReduceShape(S,b,!0),O=e.size(A),x=new h.Tensor(A,m.type),I=e.computeStrides(A),N=e.computeStrides(S),B=new Array(S.length);for(let L=0;L=b.length)return S(m[w]);const x=b[v],I=x>=_.length?1:e.size(_.slice(x+1));for(let N=0;N<_[x];N++)O=N===0?i.calcReduceByAxis(m,b,_,v+1,w,S,A):A(O,i.calcReduceByAxis(m,b,_,v+1,w,S,A)),w+=I;return O}static calcReduceShape(m,b,_){const v=m.slice();for(let w=0;ww!==0)}}n.ReduceUtil=i;class d{static adjustPoolAttributes(m,b,_,v,w,S){if(!m&&_.length!==b.length-2)throw new Error("length of specified kernel shapes should be 2 less than length of input dimensions");if(m)for(let A=0;A=_.length?_.push(b[A+2]):_[A]=b[A+2];for(let A=0;A<_.length;A++)if(A=_[A]||S[A+_.length]>=_[A])throw new Error("pads should be smaller than kernel")}}static adjustPadsBasedOnAutoPad(m,b,_,v,w,S){if(S){if(w.length!==2*(m.length-2))throw new Error("length of pads should be twice the length of data dimensions");if(b.length!==m.length-2)throw new Error("length of strides should be the length of data dimensions");if(v.length!==m.length-2)throw new Error("length of kernel shapes should be the length of data dimensions");for(let A=0;A{Object.defineProperty(n,"__esModule",{value:!0}),n.iterateExtraOptions=void 0,n.iterateExtraOptions=(a,u,c,p)=>{if(typeof a=="object"&&a!==null){if(c.has(a))throw new Error("Circular reference in options");c.add(a)}Object.entries(a).forEach(([s,h])=>{const f=u?u+s:s;if(typeof h=="object")(0,n.iterateExtraOptions)(h,f+".",c,p);else if(typeof h=="string"||typeof h=="number")p(f,h.toString());else{if(typeof h!="boolean")throw new Error("Can't handle extra config type: "+typeof h);p(f,h?"1":"0")}})}},2157:function(y,n,a){var u,c=this&&this.__createBinding||(Object.create?function(I,N,B,L){L===void 0&&(L=B);var F=Object.getOwnPropertyDescriptor(N,B);F&&!("get"in F?!N.__esModule:F.writable||F.configurable)||(F={enumerable:!0,get:function(){return N[B]}}),Object.defineProperty(I,L,F)}:function(I,N,B,L){L===void 0&&(L=B),I[L]=N[B]}),p=this&&this.__setModuleDefault||(Object.create?function(I,N){Object.defineProperty(I,"default",{enumerable:!0,value:N})}:function(I,N){I.default=N}),s=this&&this.__importStar||function(I){if(I&&I.__esModule)return I;var N={};if(I!=null)for(var B in I)B!=="default"&&Object.prototype.hasOwnProperty.call(I,B)&&c(N,I,B);return p(N,I),N};Object.defineProperty(n,"__esModule",{value:!0}),n.endProfiling=n.run=n.releaseSession=n.createSession=n.createSessionFinalize=n.createSessionAllocate=n.initOrt=n.initWasm=void 0;const h=a(1670),f=s(a(349)),l=a(6361),o=()=>!!h.env.wasm.proxy&&typeof document<"u";let t,e,r,i=!1,d=!1,g=!1;const m=[],b=[],_=[],v=[],w=[],S=[],A=()=>{if(i||!d||g||!t)throw new Error("worker not ready")},O=I=>{switch(I.data.type){case"init-wasm":i=!1,I.data.err?(g=!0,e[1](I.data.err)):(d=!0,e[0]());break;case"init-ort":I.data.err?r[1](I.data.err):r[0]();break;case"create_allocate":I.data.err?m.shift()[1](I.data.err):m.shift()[0](I.data.out);break;case"create_finalize":I.data.err?b.shift()[1](I.data.err):b.shift()[0](I.data.out);break;case"create":I.data.err?_.shift()[1](I.data.err):_.shift()[0](I.data.out);break;case"release":I.data.err?v.shift()[1](I.data.err):v.shift()[0]();break;case"run":I.data.err?w.shift()[1](I.data.err):w.shift()[0](I.data.out);break;case"end-profiling":I.data.err?S.shift()[1](I.data.err):S.shift()[0]()}},x=typeof document<"u"?(u=document==null?void 0:document.currentScript)===null||u===void 0?void 0:u.src:void 0;n.initWasm=async()=>{if(o()){if(d)return;if(i)throw new Error("multiple calls to 'initWasm()' detected.");if(g)throw new Error("previous call to 'initWasm()' failed.");return i=!0,h.env.wasm.wasmPaths===void 0&&x&&x.indexOf("blob:")!==0&&(h.env.wasm.wasmPaths=x.substr(0,+x.lastIndexOf("/")+1)),new Promise((I,N)=>{t==null||t.terminate(),t=a(9710).Z(),t.onmessage=O,e=[I,N];const B={type:"init-wasm",in:h.env.wasm};t.postMessage(B)})}return(0,l.initializeWebAssembly)(h.env.wasm)},n.initOrt=async(I,N)=>{if(o())return A(),new Promise((B,L)=>{r=[B,L];const F={type:"init-ort",in:{numThreads:I,loggingLevel:N}};t.postMessage(F)});f.initOrt(I,N)},n.createSessionAllocate=async I=>o()?(A(),new Promise((N,B)=>{m.push([N,B]);const L={type:"create_allocate",in:{model:I}};t.postMessage(L,[I.buffer])})):f.createSessionAllocate(I),n.createSessionFinalize=async(I,N)=>o()?(A(),new Promise((B,L)=>{b.push([B,L]);const F={type:"create_finalize",in:{modeldata:I,options:N}};t.postMessage(F)})):f.createSessionFinalize(I,N),n.createSession=async(I,N)=>o()?(A(),new Promise((B,L)=>{_.push([B,L]);const F={type:"create",in:{model:I,options:N}};t.postMessage(F,[I.buffer])})):f.createSession(I,N),n.releaseSession=async I=>{if(o())return A(),new Promise((N,B)=>{v.push([N,B]);const L={type:"release",in:I};t.postMessage(L)});f.releaseSession(I)},n.run=async(I,N,B,L,F)=>o()?(A(),new Promise((H,D)=>{w.push([H,D]);const j={type:"run",in:{sessionId:I,inputIndices:N,inputs:B,outputIndices:L,options:F}};t.postMessage(j,f.extractTransferableBuffers(B))})):f.run(I,N,B,L,F),n.endProfiling=async I=>{if(o())return A(),new Promise((N,B)=>{S.push([N,B]);const L={type:"end-profiling",in:I};t.postMessage(L)});f.endProfiling(I)}},586:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.setRunOptions=void 0;const u=a(7967),c=a(4983),p=a(6361);n.setRunOptions=s=>{const h=(0,p.getInstance)();let f=0;const l=[],o=s||{};try{if((s==null?void 0:s.logSeverityLevel)===void 0)o.logSeverityLevel=2;else if(typeof s.logSeverityLevel!="number"||!Number.isInteger(s.logSeverityLevel)||s.logSeverityLevel<0||s.logSeverityLevel>4)throw new Error(`log serverity level is not valid: ${s.logSeverityLevel}`);if((s==null?void 0:s.logVerbosityLevel)===void 0)o.logVerbosityLevel=0;else if(typeof s.logVerbosityLevel!="number"||!Number.isInteger(s.logVerbosityLevel))throw new Error(`log verbosity level is not valid: ${s.logVerbosityLevel}`);(s==null?void 0:s.terminate)===void 0&&(o.terminate=!1);let t=0;if((s==null?void 0:s.tag)!==void 0&&(t=(0,c.allocWasmString)(s.tag,l)),f=h._OrtCreateRunOptions(o.logSeverityLevel,o.logVerbosityLevel,!!o.terminate,t),f===0)throw new Error("Can't create run options");return(s==null?void 0:s.extra)!==void 0&&(0,u.iterateExtraOptions)(s.extra,"",new WeakSet,(e,r)=>{const i=(0,c.allocWasmString)(e,l),d=(0,c.allocWasmString)(r,l);if(h._OrtAddRunConfigEntry(f,i,d)!==0)throw new Error(`Can't set a run config entry: ${e} - ${r}`)}),[f,l]}catch(t){throw f!==0&&h._OrtReleaseRunOptions(f),l.forEach(h._free),t}}},2306:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.OnnxruntimeWebAssemblySessionHandler=void 0;const u=a(2806),c=a(1670),p=a(2850),s=a(2157);let h;n.OnnxruntimeWebAssemblySessionHandler=class{async createSessionAllocate(f){const l=await fetch(f),o=await l.arrayBuffer();return(0,s.createSessionAllocate)(new Uint8Array(o))}async loadModel(f,l){if(h||(await(0,s.initOrt)(c.env.wasm.numThreads,(o=>{switch(o){case"verbose":return 0;case"info":return 1;case"warning":return 2;case"error":return 3;case"fatal":return 4;default:throw new Error(`unsupported logging level: ${o}`)}})(c.env.logLevel)),h=!0),typeof f=="string")if(typeof fetch>"u"){const o=await(0,p.promisify)(u.readFile)(f);[this.sessionId,this.inputNames,this.outputNames]=await(0,s.createSession)(o,l)}else{const o=await this.createSessionAllocate(f);[this.sessionId,this.inputNames,this.outputNames]=await(0,s.createSessionFinalize)(o,l)}else[this.sessionId,this.inputNames,this.outputNames]=await(0,s.createSession)(f,l)}async dispose(){return(0,s.releaseSession)(this.sessionId)}async run(f,l,o){const t=[],e=[];Object.entries(f).forEach(g=>{const m=g[0],b=g[1],_=this.inputNames.indexOf(m);if(_===-1)throw new Error(`invalid input '${m}'`);t.push(b),e.push(_)});const r=[];Object.entries(l).forEach(g=>{const m=g[0],b=this.outputNames.indexOf(m);if(b===-1)throw new Error(`invalid output '${m}'`);r.push(b)});const i=await(0,s.run)(this.sessionId,e,t.map(g=>[g.type,g.dims,g.data]),r,o),d={};for(let g=0;g{Object.defineProperty(n,"__esModule",{value:!0}),n.setSessionOptions=void 0;const u=a(7967),c=a(4983),p=a(6361);n.setSessionOptions=s=>{const h=(0,p.getInstance)();let f=0;const l=[],o=s||{};(t=>{t.extra||(t.extra={}),t.extra.session||(t.extra.session={});const e=t.extra.session;e.use_ort_model_bytes_directly||(e.use_ort_model_bytes_directly="1")})(o);try{(s==null?void 0:s.graphOptimizationLevel)===void 0&&(o.graphOptimizationLevel="all");const t=(i=>{switch(i){case"disabled":return 0;case"basic":return 1;case"extended":return 2;case"all":return 99;default:throw new Error(`unsupported graph optimization level: ${i}`)}})(o.graphOptimizationLevel);(s==null?void 0:s.enableCpuMemArena)===void 0&&(o.enableCpuMemArena=!0),(s==null?void 0:s.enableMemPattern)===void 0&&(o.enableMemPattern=!0),(s==null?void 0:s.executionMode)===void 0&&(o.executionMode="sequential");const e=(i=>{switch(i){case"sequential":return 0;case"parallel":return 1;default:throw new Error(`unsupported execution mode: ${i}`)}})(o.executionMode);let r=0;if((s==null?void 0:s.logId)!==void 0&&(r=(0,c.allocWasmString)(s.logId,l)),(s==null?void 0:s.logSeverityLevel)===void 0)o.logSeverityLevel=2;else if(typeof s.logSeverityLevel!="number"||!Number.isInteger(s.logSeverityLevel)||s.logSeverityLevel<0||s.logSeverityLevel>4)throw new Error(`log serverity level is not valid: ${s.logSeverityLevel}`);if((s==null?void 0:s.logVerbosityLevel)===void 0)o.logVerbosityLevel=0;else if(typeof s.logVerbosityLevel!="number"||!Number.isInteger(s.logVerbosityLevel))throw new Error(`log verbosity level is not valid: ${s.logVerbosityLevel}`);if((s==null?void 0:s.enableProfiling)===void 0&&(o.enableProfiling=!1),f=h._OrtCreateSessionOptions(t,!!o.enableCpuMemArena,!!o.enableMemPattern,e,!!o.enableProfiling,0,r,o.logSeverityLevel,o.logVerbosityLevel),f===0)throw new Error("Can't create session options");return s!=null&&s.executionProviders&&((i,d,g)=>{for(const m of d){let b=typeof m=="string"?m:m.name;switch(b){case"xnnpack":b="XNNPACK";break;case"wasm":case"cpu":continue;default:throw new Error(`not supported EP: ${b}`)}const _=(0,c.allocWasmString)(b,g);if((0,p.getInstance)()._OrtAppendExecutionProvider(i,_)!==0)throw new Error(`Can't append execution provider: ${b}`)}})(f,s.executionProviders,l),(s==null?void 0:s.extra)!==void 0&&(0,u.iterateExtraOptions)(s.extra,"",new WeakSet,(i,d)=>{const g=(0,c.allocWasmString)(i,l),m=(0,c.allocWasmString)(d,l);if(h._OrtAddSessionConfigEntry(f,g,m)!==0)throw new Error(`Can't set a session config entry: ${i} - ${d}`)}),[f,l]}catch(t){throw f!==0&&h._OrtReleaseSessionOptions(f),l.forEach(h._free),t}}},4983:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.allocWasmString=void 0;const u=a(6361);n.allocWasmString=(c,p)=>{const s=(0,u.getInstance)(),h=s.lengthBytesUTF8(c)+1,f=s._malloc(h);return s.stringToUTF8(c,f,h),p.push(f),f}},349:(y,n,a)=>{Object.defineProperty(n,"__esModule",{value:!0}),n.extractTransferableBuffers=n.endProfiling=n.run=n.releaseSession=n.createSession=n.createSessionFinalize=n.createSessionAllocate=n.initOrt=void 0;const u=a(586),c=a(4919),p=a(4983),s=a(6361);n.initOrt=(t,e)=>{const r=(0,s.getInstance)()._OrtInit(t,e);if(r!==0)throw new Error(`Can't initialize onnxruntime. error code = ${r}`)};const h=new Map;n.createSessionAllocate=t=>{const e=(0,s.getInstance)(),r=e._malloc(t.byteLength);return e.HEAPU8.set(t,r),[r,t.byteLength]},n.createSessionFinalize=(t,e)=>{const r=(0,s.getInstance)();let i=0,d=0,g=[];try{if([d,g]=(0,c.setSessionOptions)(e),i=r._OrtCreateSession(t[0],t[1],d),i===0)throw new Error("Can't create a session")}finally{r._free(t[0]),r._OrtReleaseSessionOptions(d),g.forEach(r._free)}const m=r._OrtGetInputCount(i),b=r._OrtGetOutputCount(i),_=[],v=[],w=[],S=[];for(let A=0;A{const r=(0,n.createSessionAllocate)(t);return(0,n.createSessionFinalize)(r,e)},n.releaseSession=t=>{const e=(0,s.getInstance)(),r=h.get(t);if(!r)throw new Error("invalid session id");const i=r[0],d=r[1],g=r[2];d.forEach(e._OrtFree),g.forEach(e._OrtFree),e._OrtReleaseSession(i),h.delete(t)};const f=t=>{switch(t){case"int8":return 3;case"uint8":return 2;case"bool":return 9;case"int16":return 5;case"uint16":return 4;case"int32":return 6;case"uint32":return 12;case"float32":return 1;case"float64":return 11;case"string":return 8;case"int64":return 7;case"uint64":return 13;default:throw new Error(`unsupported data type: ${t}`)}},l=t=>{switch(t){case 3:return"int8";case 2:return"uint8";case 9:return"bool";case 5:return"int16";case 4:return"uint16";case 6:return"int32";case 12:return"uint32";case 1:return"float32";case 11:return"float64";case 8:return"string";case 7:return"int64";case 13:return"uint64";default:throw new Error(`unsupported data type: ${t}`)}},o=t=>{switch(t){case"float32":return Float32Array;case"uint8":case"bool":return Uint8Array;case"int8":return Int8Array;case"uint16":return Uint16Array;case"int16":return Int16Array;case"int32":return Int32Array;case"float64":return Float64Array;case"uint32":return Uint32Array;case"int64":return BigInt64Array;case"uint64":return BigUint64Array;default:throw new Error(`unsupported type: ${t}`)}};n.run=(t,e,r,i,d)=>{const g=(0,s.getInstance)(),m=h.get(t);if(!m)throw new Error("invalid session id");const b=m[0],_=m[1],v=m[2],w=e.length,S=i.length;let A=0,O=[];const x=[],I=[];try{[A,O]=(0,u.setRunOptions)(d);for(let D=0;Dg.HEAP32[ve++]=_e);const oe=g._OrtCreateTensor(f(j),J,ee,Ae,Z.length);if(oe===0)throw new Error("Can't create a tensor");x.push(oe)}finally{g.stackRestore(ue)}}const N=g.stackSave(),B=g.stackAlloc(4*w),L=g.stackAlloc(4*w),F=g.stackAlloc(4*S),H=g.stackAlloc(4*S);try{let D=B/4,j=L/4,Z=F/4,X=H/4;for(let ue=0;ueOe*Be);if(_e=l(Fe),_e==="string"){const Oe=[];let Be=be/4;for(let Ge=0;Ge{const e=(0,s.getInstance)(),r=h.get(t);if(!r)throw new Error("invalid session id");const i=r[0],d=e._OrtEndProfiling(i);if(d===0)throw new Error("Can't get an profile file name");e._OrtFree(d)},n.extractTransferableBuffers=t=>{const e=[];for(const r of t){const i=r[2];!Array.isArray(i)&&i.buffer&&e.push(i.buffer)}return e}},6361:function(y,n,a){var u=this&&this.__createBinding||(Object.create?function(d,g,m,b){b===void 0&&(b=m);var _=Object.getOwnPropertyDescriptor(g,m);_&&!("get"in _?!g.__esModule:_.writable||_.configurable)||(_={enumerable:!0,get:function(){return g[m]}}),Object.defineProperty(d,b,_)}:function(d,g,m,b){b===void 0&&(b=m),d[b]=g[m]}),c=this&&this.__setModuleDefault||(Object.create?function(d,g){Object.defineProperty(d,"default",{enumerable:!0,value:g})}:function(d,g){d.default=g}),p=this&&this.__importStar||function(d){if(d&&d.__esModule)return d;var g={};if(d!=null)for(var m in d)m!=="default"&&Object.prototype.hasOwnProperty.call(d,m)&&u(g,d,m);return c(g,d),g},s=this&&this.__importDefault||function(d){return d&&d.__esModule?d:{default:d}};Object.defineProperty(n,"__esModule",{value:!0}),n.dispose=n.getInstance=n.initializeWebAssembly=void 0;const h=p(a(6449)),f=s(a(932)),l=a(3474);let o,t=!1,e=!1,r=!1;const i=(d,g)=>g?d?"ort-wasm-simd-threaded.wasm":"ort-wasm-threaded.wasm":d?"ort-wasm-simd.wasm":"ort-wasm.wasm";n.initializeWebAssembly=async d=>{if(t)return Promise.resolve();if(e)throw new Error("multiple calls to 'initializeWebAssembly()' detected.");if(r)throw new Error("previous call to 'initializeWebAssembly()' failed.");e=!0;const g=d.initTimeout,m=d.numThreads,b=d.simd,_=m>1&&(()=>{try{return typeof SharedArrayBuffer<"u"&&(typeof MessageChannel<"u"&&new MessageChannel().port1.postMessage(new SharedArrayBuffer(1)),WebAssembly.validate(new Uint8Array([0,97,115,109,1,0,0,0,1,4,1,96,0,0,3,2,1,0,5,4,1,3,1,1,10,11,1,9,0,65,0,254,16,2,0,26,11])))}catch{return!1}})(),v=b&&(()=>{try{return WebAssembly.validate(new Uint8Array([0,97,115,109,1,0,0,0,1,4,1,96,0,0,3,2,1,0,10,30,1,28,0,65,0,253,15,253,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,253,186,1,26,11]))}catch{return!1}})(),w=typeof d.wasmPaths=="string"?d.wasmPaths:void 0,S=i(!1,_),A=i(v,_),O=typeof d.wasmPaths=="object"?d.wasmPaths[A]:void 0;let x=!1;const I=[];if(g>0&&I.push(new Promise(N=>{setTimeout(()=>{x=!0,N()},g)})),I.push(new Promise((N,B)=>{const L=_?l:f.default,F={locateFile:(H,D)=>_&&H.endsWith(".worker.js")&&typeof Blob<"u"?URL.createObjectURL(new Blob([a(4154)],{type:"text/javascript"})):H===S?O??(w??D)+A:D+H};if(_)if(typeof Blob>"u")F.mainScriptUrlOrBlob=h.join("/","ort-wasm-threaded.js");else{const H=`var ortWasmThreaded=(function(){var _scriptDir;return ${L.toString()}})();`;F.mainScriptUrlOrBlob=new Blob([H],{type:"text/javascript"})}L(F).then(H=>{e=!1,t=!0,o=H,N()},H=>{e=!1,r=!0,B(H)})})),await Promise.race(I),x)throw new Error(`WebAssembly backend initializing failed due to timeout: ${g}ms`)},n.getInstance=()=>{if(t&&o)return o;throw new Error("WebAssembly is not initialized yet.")},n.dispose=()=>{var d;!t||e||r||(e=!0,(d=o.PThread)===null||d===void 0||d.terminateAllThreads(),o=void 0,e=!1,t=!1,r=!0)}},9710:(y,n,a)=>{a.d(n,{Z:()=>p});var u=a(477),c=a.n(u);function p(){return c()('/*!\n* ONNX Runtime Web v1.14.0\n* Copyright (c) Microsoft Corporation. All rights reserved.\n* Licensed under the MIT License.\n*/\n(()=>{var t={474:(t,e,n)=>{var _scriptDir,r=(_scriptDir=(_scriptDir="undefined"!=typeof document&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(t){function e(){return j.buffer!=D&&N(j.buffer),P}function r(){return j.buffer!=D&&N(j.buffer),U}function a(){return j.buffer!=D&&N(j.buffer),F}function i(){return j.buffer!=D&&N(j.buffer),I}function o(){return j.buffer!=D&&N(j.buffer),W}var u,c,s;t=t||{},u||(u=void 0!==t?t:{}),u.ready=new Promise((function(t,e){c=t,s=e}));var l,f,p,h,d,y,b=Object.assign({},u),m="./this.program",g=(t,e)=>{throw e},v="object"==typeof window,w="function"==typeof importScripts,_="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node,O=u.ENVIRONMENT_IS_PTHREAD||!1,A="";function S(t){return u.locateFile?u.locateFile(t,A):A+t}if(_){let e;A=w?n(908).dirname(A)+"/":"//",y=()=>{d||(h=n(384),d=n(908))},l=function(t,e){return y(),t=d.normalize(t),h.readFileSync(t,e?void 0:"utf8")},p=t=>((t=l(t,!0)).buffer||(t=new Uint8Array(t)),t),f=(t,e,n)=>{y(),t=d.normalize(t),h.readFile(t,(function(t,r){t?n(t):e(r.buffer)}))},1{if(Q())throw process.exitCode=t,e;e instanceof ct||x("exiting due to exception: "+e),process.exit(t)},u.inspect=function(){return"[Emscripten Module object]"};try{e=n(925)}catch(t){throw console.error(\'The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?\'),t}n.g.Worker=e.Worker}else(v||w)&&(w?A=self.location.href:"undefined"!=typeof document&&document.currentScript&&(A=document.currentScript.src),_scriptDir&&(A=_scriptDir),A=0!==A.indexOf("blob:")?A.substr(0,A.replace(/[?#].*/,"").lastIndexOf("/")+1):"",_||(l=t=>{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.send(null),e.responseText},w&&(p=t=>{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.responseType="arraybuffer",e.send(null),new Uint8Array(e.response)}),f=(t,e,n)=>{var r=new XMLHttpRequest;r.open("GET",t,!0),r.responseType="arraybuffer",r.onload=()=>{200==r.status||0==r.status&&r.response?e(r.response):n()},r.onerror=n,r.send(null)}));_&&"undefined"==typeof performance&&(n.g.performance=n(953).performance);var T=console.log.bind(console),E=console.warn.bind(console);_&&(y(),T=t=>h.writeSync(1,t+"\\n"),E=t=>h.writeSync(2,t+"\\n"));var M,C=u.print||T,x=u.printErr||E;Object.assign(u,b),b=null,u.thisProgram&&(m=u.thisProgram),u.quit&&(g=u.quit),u.wasmBinary&&(M=u.wasmBinary);var R=u.noExitRuntime||!1;"object"!=typeof WebAssembly&&at("no native wasm support detected");var j,k,D,P,U,F,I,W,H=!1,L="undefined"!=typeof TextDecoder?new TextDecoder("utf8"):void 0;function z(t,e,n){var r=(e>>>=0)+n;for(n=e;t[n]&&!(n>=r);)++n;if(16(a=224==(240&a)?(15&a)<<12|i<<6|o:(7&a)<<18|i<<12|o<<6|63&t[e++])?r+=String.fromCharCode(a):(a-=65536,r+=String.fromCharCode(55296|a>>10,56320|1023&a))}}else r+=String.fromCharCode(a)}return r}function Y(t,e){return(t>>>=0)?z(r(),t,e):""}function B(t,e,n,r){if(!(0>>=0;r=n+r-1;for(var i=0;i=o&&(o=65536+((1023&o)<<10)|1023&t.charCodeAt(++i)),127>=o){if(n>=r)break;e[n++>>>0]=o}else{if(2047>=o){if(n+1>=r)break;e[n++>>>0]=192|o>>6}else{if(65535>=o){if(n+2>=r)break;e[n++>>>0]=224|o>>12}else{if(n+3>=r)break;e[n++>>>0]=240|o>>18,e[n++>>>0]=128|o>>12&63}e[n++>>>0]=128|o>>6&63}e[n++>>>0]=128|63&o}}return e[n>>>0]=0,n-a}function G(t){for(var e=0,n=0;n=r?e++:2047>=r?e+=2:55296<=r&&57343>=r?(e+=4,++n):e+=3}return e}function N(t){D=t,u.HEAP8=P=new Int8Array(t),u.HEAP16=new Int16Array(t),u.HEAP32=F=new Int32Array(t),u.HEAPU8=U=new Uint8Array(t),u.HEAPU16=new Uint16Array(t),u.HEAPU32=I=new Uint32Array(t),u.HEAPF32=new Float32Array(t),u.HEAPF64=W=new Float64Array(t)}O&&(D=u.buffer);var V=u.INITIAL_MEMORY||16777216;if(O)j=u.wasmMemory,D=u.buffer;else if(u.wasmMemory)j=u.wasmMemory;else if(!((j=new WebAssembly.Memory({initial:V/65536,maximum:65536,shared:!0})).buffer instanceof SharedArrayBuffer))throw x("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"),_&&console.log("(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)"),Error("bad memory");j&&(D=j.buffer),V=D.byteLength,N(D);var $,q=[],X=[],J=[],Z=[];function Q(){return R||!1}function K(){var t=u.preRun.shift();q.unshift(t)}var tt,et=0,nt=null,rt=null;function at(t){throw O?postMessage({cmd:"onAbort",arg:t}):u.onAbort&&u.onAbort(t),x(t="Aborted("+t+")"),H=!0,t=new WebAssembly.RuntimeError(t+". Build with -sASSERTIONS for more info."),s(t),t}function it(){return tt.startsWith("data:application/octet-stream;base64,")}function ot(){var t=tt;try{if(t==tt&&M)return new Uint8Array(M);if(p)return p(t);throw"both async and sync fetching of the wasm failed"}catch(t){at(t)}}tt="ort-wasm-threaded.wasm",it()||(tt=S(tt));var ut={};function ct(t){this.name="ExitStatus",this.message="Program terminated with exit("+t+")",this.status=t}function st(t){(t=ht.Vb[t])||at(),ht.mc(t)}function lt(t){var e=ht.Cc();if(!e)return 6;ht.ac.push(e),ht.Vb[t.Ub]=e,e.Ub=t.Ub;var n={cmd:"run",start_routine:t.Ic,arg:t.zc,pthread_ptr:t.Ub};return e.$b=()=>{n.time=performance.now(),e.postMessage(n,t.Nc)},e.loaded&&(e.$b(),delete e.$b),0}function ft(t){if(O)return $t(1,1,t);Q()||(ht.oc(),u.onExit&&u.onExit(t),H=!0),g(t,new ct(t))}function pt(t,e){if(!e&&O)throw bt(t),"unwind";Q()||O||(me(),dt(J),be(0),re[1].length&&ae(1,10),re[2].length&&ae(2,10),ht.oc()),ft(t)}var ht={Yb:[],ac:[],qc:[],Vb:{},fc:function(){O&&ht.Ec()},Pc:function(){},Ec:function(){ht.receiveObjectTransfer=ht.Gc,ht.threadInitTLS=ht.pc,ht.setExitStatus=ht.nc,R=!1},nc:function(){},oc:function(){for(var t of Object.values(ht.Vb))ht.mc(t);for(t of ht.Yb)t.terminate();ht.Yb=[]},mc:function(t){var e=t.Ub;delete ht.Vb[e],ht.Yb.push(t),ht.ac.splice(ht.ac.indexOf(t),1),t.Ub=0,Oe(e)},Gc:function(){},pc:function(){ht.qc.forEach((t=>t()))},Fc:function(t,e){t.onmessage=n=>{var r=(n=n.data).cmd;if(t.Ub&&(ht.Bc=t.Ub),n.targetThread&&n.targetThread!=he()){var a=ht.Vb[n.Qc];a?a.postMessage(n,n.transferList):x(\'Internal error! Worker sent a message "\'+r+\'" to target pthread \'+n.targetThread+", but that thread no longer exists!")}else"processProxyingQueue"===r?zt(n.queue):"spawnThread"===r?lt(n):"cleanupThread"===r?st(n.thread):"killThread"===r?(n=n.thread,r=ht.Vb[n],delete ht.Vb[n],r.terminate(),Oe(n),ht.ac.splice(ht.ac.indexOf(r),1),r.Ub=0):"cancelThread"===r?ht.Vb[n.thread].postMessage({cmd:"cancel"}):"loaded"===r?(t.loaded=!0,e&&e(t),t.$b&&(t.$b(),delete t.$b)):"print"===r?C("Thread "+n.threadId+": "+n.text):"printErr"===r?x("Thread "+n.threadId+": "+n.text):"alert"===r?alert("Thread "+n.threadId+": "+n.text):"setimmediate"===n.target?t.postMessage(n):"onAbort"===r?u.onAbort&&u.onAbort(n.arg):r&&x("worker sent an unknown command "+r);ht.Bc=void 0},t.onerror=t=>{throw x("worker sent an error! "+t.filename+":"+t.lineno+": "+t.message),t},_&&(t.on("message",(function(e){t.onmessage({data:e})})),t.on("error",(function(e){t.onerror(e)})),t.on("detachedExit",(function(){}))),t.postMessage({cmd:"load",urlOrBlob:u.mainScriptUrlOrBlob||_scriptDir,wasmMemory:j,wasmModule:k})},yc:function(){var t=S("ort-wasm-threaded.worker.js");ht.Yb.push(new Worker(t))},Cc:function(){return 0==ht.Yb.length&&(ht.yc(),ht.Fc(ht.Yb[0])),ht.Yb.pop()}};function dt(t){for(;0>2>>>0];t=a()[t+48>>2>>>0],Te(e,e-t),Me(e)};var mt=[];function gt(t){var e=mt[t];return e||(t>=mt.length&&(mt.length=t+1),mt[t]=e=$.get(t)),e}u.invokeEntryPoint=function(t,e){t=gt(t)(e),Q()?ht.nc(t):Ae(t)};var vt,wt,_t=[],Ot=0,At=0;function St(t){this.Zb=t,this.Sb=t-24,this.xc=function(t){i()[this.Sb+4>>2>>>0]=t},this.bc=function(){return i()[this.Sb+4>>2>>>0]},this.wc=function(t){i()[this.Sb+8>>2>>>0]=t},this.Dc=function(){return i()[this.Sb+8>>2>>>0]},this.rc=function(){a()[this.Sb>>2>>>0]=0},this.hc=function(t){t=t?1:0,e()[this.Sb+12>>0>>>0]=t},this.uc=function(){return 0!=e()[this.Sb+12>>0>>>0]},this.ic=function(t){t=t?1:0,e()[this.Sb+13>>0>>>0]=t},this.kc=function(){return 0!=e()[this.Sb+13>>0>>>0]},this.fc=function(t,e){this.cc(0),this.xc(t),this.wc(e),this.rc(),this.hc(!1),this.ic(!1)},this.sc=function(){Atomics.add(a(),this.Sb>>2,1)},this.Hc=function(){return 1===Atomics.sub(a(),this.Sb>>2,1)},this.cc=function(t){i()[this.Sb+16>>2>>>0]=t},this.tc=function(){return i()[this.Sb+16>>2>>>0]},this.vc=function(){if(Re(this.bc()))return i()[this.Zb>>2>>>0];var t=this.tc();return 0!==t?t:this.Zb}}function Tt(t){return ye(new St(t).Sb)}function Et(t,e,n,r){return O?$t(3,1,t,e,n,r):Mt(t,e,n,r)}function Mt(t,e,n,r){if("undefined"==typeof SharedArrayBuffer)return x("Current environment does not support SharedArrayBuffer, pthreads are not available!"),6;var a=[];return O&&0===a.length?Et(t,e,n,r):(t={Ic:n,Ub:t,zc:r,Nc:a},O?(t.Oc="spawnThread",postMessage(t,a),0):lt(t))}function Ct(t,e,n){return O?$t(4,1,t,e,n):0}function xt(t,e){if(O)return $t(5,1,t,e)}function Rt(t,e){if(O)return $t(6,1,t,e)}function jt(t,e,n){if(O)return $t(7,1,t,e,n)}function kt(t,e,n){return O?$t(8,1,t,e,n):0}function Dt(t,e){if(O)return $t(9,1,t,e)}function Pt(t,e,n){if(O)return $t(10,1,t,e,n)}function Ut(t,e,n,r){if(O)return $t(11,1,t,e,n,r)}function Ft(t,e,n,r){if(O)return $t(12,1,t,e,n,r)}function It(t,e,n,r){if(O)return $t(13,1,t,e,n,r)}function Wt(t){if(O)return $t(14,1,t)}function Ht(t,e){if(O)return $t(15,1,t,e)}function Lt(t,e,n){if(O)return $t(16,1,t,e,n)}function zt(t){Atomics.store(a(),t>>2,1),he()&&_e(t),Atomics.compareExchange(a(),t>>2,1,0)}function Yt(t){return i()[t>>>2]+4294967296*a()[t+4>>>2]}function Bt(t,e,n,r,a,i){return O?$t(17,1,t,e,n,r,a,i):-52}function Gt(t,e,n,r,a,i){if(O)return $t(18,1,t,e,n,r,a,i)}function Nt(t){var n=G(t)+1,r=de(n);return r&&B(t,e(),r,n),r}function Vt(t,e,n){function r(t){return(t=t.toTimeString().match(/\\(([A-Za-z ]+)\\)$/))?t[1]:"GMT"}if(O)return $t(19,1,t,e,n);var o=(new Date).getFullYear(),u=new Date(o,0,1),c=new Date(o,6,1);o=u.getTimezoneOffset();var s=c.getTimezoneOffset(),l=Math.max(o,s);a()[t>>2>>>0]=60*l,a()[e>>2>>>0]=Number(o!=s),t=r(u),e=r(c),t=Nt(t),e=Nt(e),s>2>>>0]=t,i()[n+4>>2>>>0]=e):(i()[n>>2>>>0]=e,i()[n+4>>2>>>0]=t)}function $t(t,e){var n=arguments.length-2,r=arguments;return yt((()=>{for(var a=Ce(8*n),i=a>>3,u=0;u>>0]=c}return we(t,n,a,e)}))}u.executeNotifiedProxyingQueue=zt,wt=_?()=>{var t=process.hrtime();return 1e3*t[0]+t[1]/1e6}:O?()=>performance.now()-u.__performance_now_clock_drift:()=>performance.now();var qt,Xt=[],Jt={};function Zt(){if(!qt){var t,e={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:("object"==typeof navigator&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:m||"./this.program"};for(t in Jt)void 0===Jt[t]?delete e[t]:e[t]=Jt[t];var n=[];for(t in e)n.push(t+"="+e[t]);qt=n}return qt}function Qt(t,n){if(O)return $t(20,1,t,n);var r=0;return Zt().forEach((function(a,o){var u=n+r;for(o=i()[t+4*o>>2>>>0]=u,u=0;u>0>>>0]=a.charCodeAt(u);e()[o>>0>>>0]=0,r+=a.length+1})),0}function Kt(t,e){if(O)return $t(21,1,t,e);var n=Zt();i()[t>>2>>>0]=n.length;var r=0;return n.forEach((function(t){r+=t.length+1})),i()[e>>2>>>0]=r,0}function te(t){return O?$t(22,1,t):52}function ee(t,e,n,r){return O?$t(23,1,t,e,n,r):52}function ne(t,e,n,r,a){return O?$t(24,1,t,e,n,r,a):70}var re=[null,[],[]];function ae(t,e){var n=re[t];0===e||10===e?((1===t?C:x)(z(n,0)),n.length=0):n.push(e)}function ie(t,e,n,a){if(O)return $t(25,1,t,e,n,a);for(var o=0,u=0;u>2>>>0],s=i()[e+4>>2>>>0];e+=8;for(var l=0;l>>0]);o+=s}return i()[a>>2>>>0]=o,0}var oe=0;function ue(t){return 0==t%4&&(0!=t%100||0==t%400)}var ce=[31,29,31,30,31,30,31,31,30,31,30,31],se=[31,28,31,30,31,30,31,31,30,31,30,31];function le(t,n,r,i){function o(t,e,n){for(t="number"==typeof t?t.toString():t||"";t.lengtht?-1:0r-t.getDate())){t.setDate(t.getDate()+e);break}e-=r-t.getDate()+1,t.setDate(1),11>n?t.setMonth(n+1):(t.setMonth(0),t.setFullYear(t.getFullYear()+1))}return n=new Date(t.getFullYear()+1,0,4),e=s(new Date(t.getFullYear(),0,4)),n=s(n),0>=c(e,t)?0>=c(n,t)?t.getFullYear()+1:t.getFullYear():t.getFullYear()-1}var f=a()[i+40>>2>>>0];for(var p in i={Lc:a()[i>>2>>>0],Kc:a()[i+4>>2>>>0],dc:a()[i+8>>2>>>0],jc:a()[i+12>>2>>>0],ec:a()[i+16>>2>>>0],Xb:a()[i+20>>2>>>0],Tb:a()[i+24>>2>>>0],Wb:a()[i+28>>2>>>0],Rc:a()[i+32>>2>>>0],Jc:a()[i+36>>2>>>0],Mc:f?Y(f):""},r=Y(r),f={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"})r=r.replace(new RegExp(p,"g"),f[p]);var h="Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),d="January February March April May June July August September October November December".split(" ");for(p in f={"%a":function(t){return h[t.Tb].substring(0,3)},"%A":function(t){return h[t.Tb]},"%b":function(t){return d[t.ec].substring(0,3)},"%B":function(t){return d[t.ec]},"%C":function(t){return u((t.Xb+1900)/100|0,2)},"%d":function(t){return u(t.jc,2)},"%e":function(t){return o(t.jc,2," ")},"%g":function(t){return l(t).toString().substring(2)},"%G":function(t){return l(t)},"%H":function(t){return u(t.dc,2)},"%I":function(t){return 0==(t=t.dc)?t=12:12t.dc?"AM":"PM"},"%S":function(t){return u(t.Lc,2)},"%t":function(){return"\\t"},"%u":function(t){return t.Tb||7},"%U":function(t){return u(Math.floor((t.Wb+7-t.Tb)/7),2)},"%V":function(t){var e=Math.floor((t.Wb+7-(t.Tb+6)%7)/7);if(2>=(t.Tb+371-t.Wb-2)%7&&e++,e)53==e&&(4==(n=(t.Tb+371-t.Wb)%7)||3==n&&ue(t.Xb)||(e=1));else{e=52;var n=(t.Tb+7-t.Wb-1)%7;(4==n||5==n&&ue(t.Xb%400-1))&&e++}return u(e,2)},"%w":function(t){return t.Tb},"%W":function(t){return u(Math.floor((t.Wb+7-(t.Tb+6)%7)/7),2)},"%y":function(t){return(t.Xb+1900).toString().substring(2)},"%Y":function(t){return t.Xb+1900},"%z":function(t){var e=0<=(t=t.Jc);return t=Math.abs(t)/60,(e?"+":"-")+String("0000"+(t/60*100+t%60)).slice(-4)},"%Z":function(t){return t.Mc},"%%":function(){return"%"}},r=r.replace(/%%/g,"\\0\\0"),f)r.includes(p)&&(r=r.replace(new RegExp(p,"g"),f[p](i)));return p=function(t){var e=Array(G(t)+1);return B(t,e,0,e.length),e}(r=r.replace(/\\0\\0/g,"%")),p.length>n?0:(function(t,n){e().set(t,n>>>0)}(p,t),p.length-1)}ht.fc();var fe=[null,ft,bt,Et,Ct,xt,Rt,jt,kt,Dt,Pt,Ut,Ft,It,Wt,Ht,Lt,Bt,Gt,Vt,Qt,Kt,te,ee,ne,ie],pe={b:function(t){return de(t+24)+24},n:function(t){return(t=new St(t)).uc()||(t.hc(!0),Ot--),t.ic(!1),_t.push(t),t.sc(),t.vc()},ma:function(t){throw x("Unexpected exception thrown, this is not properly supported - aborting"),H=!0,t},x:function(){Se(0);var t=_t.pop();if(t.Hc()&&!t.kc()){var e=t.Dc();e&>(e)(t.Zb),Tt(t.Zb)}At=0},e:function(){var t=At;if(!t)return oe=0;var e=new St(t);e.cc(t);var n=e.bc();if(!n)return oe=0,t;for(var r=Array.prototype.slice.call(arguments),a=0;azt(r)));else if(O)postMessage({targetThread:t,cmd:"processProxyingQueue",queue:r});else{if(!(t=ht.Vb[t]))return;t.postMessage({cmd:"processProxyingQueue",queue:r})}return 1},Ea:function(){return-1},Pa:function(t,e){t=new Date(1e3*Yt(t)),a()[e>>2>>>0]=t.getUTCSeconds(),a()[e+4>>2>>>0]=t.getUTCMinutes(),a()[e+8>>2>>>0]=t.getUTCHours(),a()[e+12>>2>>>0]=t.getUTCDate(),a()[e+16>>2>>>0]=t.getUTCMonth(),a()[e+20>>2>>>0]=t.getUTCFullYear()-1900,a()[e+24>>2>>>0]=t.getUTCDay(),t=(t.getTime()-Date.UTC(t.getUTCFullYear(),0,1,0,0,0,0))/864e5|0,a()[e+28>>2>>>0]=t},Qa:function(t,e){t=new Date(1e3*Yt(t)),a()[e>>2>>>0]=t.getSeconds(),a()[e+4>>2>>>0]=t.getMinutes(),a()[e+8>>2>>>0]=t.getHours(),a()[e+12>>2>>>0]=t.getDate(),a()[e+16>>2>>>0]=t.getMonth(),a()[e+20>>2>>>0]=t.getFullYear()-1900,a()[e+24>>2>>>0]=t.getDay();var n=new Date(t.getFullYear(),0,1),r=(t.getTime()-n.getTime())/864e5|0;a()[e+28>>2>>>0]=r,a()[e+36>>2>>>0]=-60*t.getTimezoneOffset(),r=new Date(t.getFullYear(),6,1).getTimezoneOffset(),t=0|(r!=(n=n.getTimezoneOffset())&&t.getTimezoneOffset()==Math.min(n,r)),a()[e+32>>2>>>0]=t},Ra:function(t){var e=new Date(a()[t+20>>2>>>0]+1900,a()[t+16>>2>>>0],a()[t+12>>2>>>0],a()[t+8>>2>>>0],a()[t+4>>2>>>0],a()[t>>2>>>0],0),n=a()[t+32>>2>>>0],r=e.getTimezoneOffset(),i=new Date(e.getFullYear(),0,1),o=new Date(e.getFullYear(),6,1).getTimezoneOffset(),u=i.getTimezoneOffset(),c=Math.min(u,o);return 0>n?a()[t+32>>2>>>0]=Number(o!=u&&c==r):0>2>>>0]=e.getDay(),n=(e.getTime()-i.getTime())/864e5|0,a()[t+28>>2>>>0]=n,a()[t>>2>>>0]=e.getSeconds(),a()[t+4>>2>>>0]=e.getMinutes(),a()[t+8>>2>>>0]=e.getHours(),a()[t+12>>2>>>0]=e.getDate(),a()[t+16>>2>>>0]=e.getMonth(),e.getTime()/1e3|0},Aa:Bt,Ba:Gt,Sa:function t(e,n,r){t.Ac||(t.Ac=!0,Vt(e,n,r))},y:function(){at("")},U:function(){if(!_&&!w){var t="Blocking on the main thread is very dangerous, see https://emscripten.org/docs/porting/pthreads.html#blocking-on-the-main-browser-thread";vt||(vt={}),vt[t]||(vt[t]=1,_&&(t="warning: "+t),x(t))}},ra:function(){return 4294901760},B:wt,Ia:function(t,e,n){r().copyWithin(t>>>0,e>>>0,e+n>>>0)},F:function(){return _?n(993).cpus().length:navigator.hardwareConcurrency},Da:function(t,e,n){Xt.length=e,n>>=3;for(var r=0;r>>0];return(0>t?ut[-t-1]:fe[t]).apply(null,Xt)},qa:function(t){var e=r().length;if((t>>>=0)<=e||4294901760=n;n*=2){var a=e*(1+.2/n);a=Math.min(a,t+100663296);var i=Math;a=Math.max(t,a),i=i.min.call(i,4294901760,a+(65536-a%65536)%65536);t:{try{j.grow(i-D.byteLength+65535>>>16),N(j.buffer);var o=1;break t}catch(t){}o=void 0}if(o)return!0}return!1},Na:function(){throw"unwind"},Ga:Qt,Ha:Kt,J:pt,I:te,S:ee,ga:ne,R:ie,d:function(){return oe},na:function t(r,a){t.lc||(t.lc=function(){if("object"==typeof crypto&&"function"==typeof crypto.getRandomValues){var t=new Uint8Array(1);return()=>(crypto.getRandomValues(t),t[0])}if(_)try{var e=n(Object(function(){var t=new Error("Cannot find module \'crypto\'");throw t.code="MODULE_NOT_FOUND",t}()));return()=>e.randomBytes(1)[0]}catch(t){}return()=>at("randomDevice")}());for(var i=0;i>0>>>0]=t.lc();return 0},ia:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},ja:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},K:function(t){var e=Ee();try{return gt(t)()}catch(t){if(Me(e),t!==t+0)throw t;Se(1,0)}},f:function(t,e){var n=Ee();try{return gt(t)(e)}catch(t){if(Me(n),t!==t+0)throw t;Se(1,0)}},P:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},Q:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},k:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},p:function(t,e,n,r){var a=Ee();try{return gt(t)(e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},q:function(t,e,n,r,a){var i=Ee();try{return gt(t)(e,n,r,a)}catch(t){if(Me(i),t!==t+0)throw t;Se(1,0)}},N:function(t,e,n,r,a,i){var o=Ee();try{return gt(t)(e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},s:function(t,e,n,r,a,i){var o=Ee();try{return gt(t)(e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},w:function(t,e,n,r,a,i,o){var u=Ee();try{return gt(t)(e,n,r,a,i,o)}catch(t){if(Me(u),t!==t+0)throw t;Se(1,0)}},L:function(t,e,n,r,a,i,o,u){var c=Ee();try{return gt(t)(e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},E:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=Ee();try{return gt(t)(e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(Me(p),t!==t+0)throw t;Se(1,0)}},aa:function(t,e,n,r,a,i,o,u){var c=Ee();try{return He(t,e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},_:function(t,e,n,r,a,i,o){var u=Ee();try{return ke(t,e,n,r,a,i,o)}catch(t){if(Me(u),t!==t+0)throw t;Se(1,0)}},Z:function(t,e,n,r,a){var i=Ee();try{return Le(t,e,n,r,a)}catch(t){if(Me(i),t!==t+0)throw t;Se(1,0)}},ca:function(t,e,n,r){var a=Ee();try{return Ie(t,e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},$:function(t){var e=Ee();try{return je(t)}catch(t){if(Me(e),t!==t+0)throw t;Se(1,0)}},ba:function(t,e){var n=Ee();try{return We(t,e)}catch(t){if(Me(n),t!==t+0)throw t;Se(1,0)}},Y:function(t,e,n){var r=Ee();try{return De(t,e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},g:function(t){var e=Ee();try{gt(t)()}catch(t){if(Me(e),t!==t+0)throw t;Se(1,0)}},r:function(t,e){var n=Ee();try{gt(t)(e)}catch(t){if(Me(n),t!==t+0)throw t;Se(1,0)}},i:function(t,e,n){var r=Ee();try{gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},ha:function(t,e,n,r){var a=Ee();try{gt(t)(e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},m:function(t,e,n,r){var a=Ee();try{gt(t)(e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},v:function(t,e,n,r,a){var i=Ee();try{gt(t)(e,n,r,a)}catch(t){if(Me(i),t!==t+0)throw t;Se(1,0)}},u:function(t,e,n,r,a,i){var o=Ee();try{gt(t)(e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},O:function(t,e,n,r,a,i,o){var u=Ee();try{gt(t)(e,n,r,a,i,o)}catch(t){if(Me(u),t!==t+0)throw t;Se(1,0)}},A:function(t,e,n,r,a,i,o,u){var c=Ee();try{gt(t)(e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},ka:function(t,e,n,r,a,i,o,u,c){var s=Ee();try{gt(t)(e,n,r,a,i,o,u,c)}catch(t){if(Me(s),t!==t+0)throw t;Se(1,0)}},C:function(t,e,n,r,a,i,o,u,c,s,l){var f=Ee();try{gt(t)(e,n,r,a,i,o,u,c,s,l)}catch(t){if(Me(f),t!==t+0)throw t;Se(1,0)}},D:function(t,e,n,r,a,i,o,u,c,s,l,f,p,h,d,y){var b=Ee();try{gt(t)(e,n,r,a,i,o,u,c,s,l,f,p,h,d,y)}catch(t){if(Me(b),t!==t+0)throw t;Se(1,0)}},fa:function(t,e,n,r,a,i,o,u){var c=Ee();try{Pe(t,e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},da:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=Ee();try{Fe(t,e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(Me(p),t!==t+0)throw t;Se(1,0)}},ea:function(t,e,n,r,a,i){var o=Ee();try{Ue(t,e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},o:function(t){return t},a:j||u.wasmMemory,G:function(t){oe=t},la:le,z:function(t,e,n,r){return le(t,e,n,r)}};!function(){function t(t,e){u.asm=t.exports,ht.qc.push(u.asm.sb),$=u.asm.ub,X.unshift(u.asm.Va),k=e,O||(et--,u.monitorRunDependencies&&u.monitorRunDependencies(et),0==et&&(null!==nt&&(clearInterval(nt),nt=null),rt&&(t=rt,rt=null,t())))}function e(e){t(e.instance,e.module)}function n(t){return function(){if(!M&&(v||w)){if("function"==typeof fetch&&!tt.startsWith("file://"))return fetch(tt,{credentials:"same-origin"}).then((function(t){if(!t.ok)throw"failed to load wasm binary file at \'"+tt+"\'";return t.arrayBuffer()})).catch((function(){return ot()}));if(f)return new Promise((function(t,e){f(tt,(function(e){t(new Uint8Array(e))}),e)}))}return Promise.resolve().then((function(){return ot()}))}().then((function(t){return WebAssembly.instantiate(t,r)})).then((function(t){return t})).then(t,(function(t){x("failed to asynchronously prepare wasm: "+t),at(t)}))}var r={a:pe};if(O||(et++,u.monitorRunDependencies&&u.monitorRunDependencies(et)),u.instantiateWasm)try{return u.instantiateWasm(r,t)}catch(t){return x("Module.instantiateWasm callback failed with error: "+t),!1}(M||"function"!=typeof WebAssembly.instantiateStreaming||it()||tt.startsWith("file://")||_||"function"!=typeof fetch?n(e):fetch(tt,{credentials:"same-origin"}).then((function(t){return WebAssembly.instantiateStreaming(t,r).then(e,(function(t){return x("wasm streaming compile failed: "+t),x("falling back to ArrayBuffer instantiation"),n(e)}))}))).catch(s)}(),u.___wasm_call_ctors=function(){return(u.___wasm_call_ctors=u.asm.Va).apply(null,arguments)},u._OrtInit=function(){return(u._OrtInit=u.asm.Wa).apply(null,arguments)},u._OrtCreateSessionOptions=function(){return(u._OrtCreateSessionOptions=u.asm.Xa).apply(null,arguments)},u._OrtAppendExecutionProvider=function(){return(u._OrtAppendExecutionProvider=u.asm.Ya).apply(null,arguments)},u._OrtAddSessionConfigEntry=function(){return(u._OrtAddSessionConfigEntry=u.asm.Za).apply(null,arguments)},u._OrtReleaseSessionOptions=function(){return(u._OrtReleaseSessionOptions=u.asm._a).apply(null,arguments)},u._OrtCreateSession=function(){return(u._OrtCreateSession=u.asm.$a).apply(null,arguments)},u._OrtReleaseSession=function(){return(u._OrtReleaseSession=u.asm.ab).apply(null,arguments)},u._OrtGetInputCount=function(){return(u._OrtGetInputCount=u.asm.bb).apply(null,arguments)},u._OrtGetOutputCount=function(){return(u._OrtGetOutputCount=u.asm.cb).apply(null,arguments)},u._OrtGetInputName=function(){return(u._OrtGetInputName=u.asm.db).apply(null,arguments)},u._OrtGetOutputName=function(){return(u._OrtGetOutputName=u.asm.eb).apply(null,arguments)},u._OrtFree=function(){return(u._OrtFree=u.asm.fb).apply(null,arguments)},u._OrtCreateTensor=function(){return(u._OrtCreateTensor=u.asm.gb).apply(null,arguments)},u._OrtGetTensorData=function(){return(u._OrtGetTensorData=u.asm.hb).apply(null,arguments)},u._OrtReleaseTensor=function(){return(u._OrtReleaseTensor=u.asm.ib).apply(null,arguments)},u._OrtCreateRunOptions=function(){return(u._OrtCreateRunOptions=u.asm.jb).apply(null,arguments)},u._OrtAddRunConfigEntry=function(){return(u._OrtAddRunConfigEntry=u.asm.kb).apply(null,arguments)},u._OrtReleaseRunOptions=function(){return(u._OrtReleaseRunOptions=u.asm.lb).apply(null,arguments)},u._OrtRun=function(){return(u._OrtRun=u.asm.mb).apply(null,arguments)},u._OrtEndProfiling=function(){return(u._OrtEndProfiling=u.asm.nb).apply(null,arguments)};var he=u._pthread_self=function(){return(he=u._pthread_self=u.asm.ob).apply(null,arguments)},de=u._malloc=function(){return(de=u._malloc=u.asm.pb).apply(null,arguments)},ye=u._free=function(){return(ye=u._free=u.asm.qb).apply(null,arguments)},be=u._fflush=function(){return(be=u._fflush=u.asm.rb).apply(null,arguments)};u.__emscripten_tls_init=function(){return(u.__emscripten_tls_init=u.asm.sb).apply(null,arguments)};var me=u.___funcs_on_exit=function(){return(me=u.___funcs_on_exit=u.asm.tb).apply(null,arguments)},ge=u.__emscripten_thread_init=function(){return(ge=u.__emscripten_thread_init=u.asm.vb).apply(null,arguments)};u.__emscripten_thread_crashed=function(){return(u.__emscripten_thread_crashed=u.asm.wb).apply(null,arguments)};var ve,we=u._emscripten_run_in_main_runtime_thread_js=function(){return(we=u._emscripten_run_in_main_runtime_thread_js=u.asm.xb).apply(null,arguments)},_e=u.__emscripten_proxy_execute_task_queue=function(){return(_e=u.__emscripten_proxy_execute_task_queue=u.asm.yb).apply(null,arguments)},Oe=u.__emscripten_thread_free_data=function(){return(Oe=u.__emscripten_thread_free_data=u.asm.zb).apply(null,arguments)},Ae=u.__emscripten_thread_exit=function(){return(Ae=u.__emscripten_thread_exit=u.asm.Ab).apply(null,arguments)},Se=u._setThrew=function(){return(Se=u._setThrew=u.asm.Bb).apply(null,arguments)},Te=u._emscripten_stack_set_limits=function(){return(Te=u._emscripten_stack_set_limits=u.asm.Cb).apply(null,arguments)},Ee=u.stackSave=function(){return(Ee=u.stackSave=u.asm.Db).apply(null,arguments)},Me=u.stackRestore=function(){return(Me=u.stackRestore=u.asm.Eb).apply(null,arguments)},Ce=u.stackAlloc=function(){return(Ce=u.stackAlloc=u.asm.Fb).apply(null,arguments)},xe=u.___cxa_can_catch=function(){return(xe=u.___cxa_can_catch=u.asm.Gb).apply(null,arguments)},Re=u.___cxa_is_pointer_type=function(){return(Re=u.___cxa_is_pointer_type=u.asm.Hb).apply(null,arguments)},je=u.dynCall_j=function(){return(je=u.dynCall_j=u.asm.Ib).apply(null,arguments)},ke=u.dynCall_iiiiij=function(){return(ke=u.dynCall_iiiiij=u.asm.Jb).apply(null,arguments)},De=u.dynCall_jii=function(){return(De=u.dynCall_jii=u.asm.Kb).apply(null,arguments)},Pe=u.dynCall_viiiiij=function(){return(Pe=u.dynCall_viiiiij=u.asm.Lb).apply(null,arguments)},Ue=u.dynCall_vjji=function(){return(Ue=u.dynCall_vjji=u.asm.Mb).apply(null,arguments)},Fe=u.dynCall_viiijjjii=function(){return(Fe=u.dynCall_viiijjjii=u.asm.Nb).apply(null,arguments)},Ie=u.dynCall_iij=function(){return(Ie=u.dynCall_iij=u.asm.Ob).apply(null,arguments)},We=u.dynCall_ji=function(){return(We=u.dynCall_ji=u.asm.Pb).apply(null,arguments)},He=u.dynCall_iiiiiij=function(){return(He=u.dynCall_iiiiiij=u.asm.Qb).apply(null,arguments)},Le=u.dynCall_iiij=function(){return(Le=u.dynCall_iiij=u.asm.Rb).apply(null,arguments)};function ze(){function t(){if(!ve&&(ve=!0,u.calledRun=!0,!H)&&(O||dt(X),c(u),u.onRuntimeInitialized&&u.onRuntimeInitialized(),!O)){if(u.postRun)for("function"==typeof u.postRun&&(u.postRun=[u.postRun]);u.postRun.length;){var t=u.postRun.shift();Z.unshift(t)}dt(Z)}}if(!(0{var _scriptDir,r=(_scriptDir=(_scriptDir="undefined"!=typeof document&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(t){var e,r,a;t=t||{},e||(e=void 0!==t?t:{}),e.ready=new Promise((function(t,e){r=t,a=e}));var i,o,u,c,s,l,f=Object.assign({},e),p="./this.program",h=(t,e)=>{throw e},d="object"==typeof window,y="function"==typeof importScripts,b="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node,m="";b?(m=y?n(908).dirname(m)+"/":"//",l=()=>{s||(c=n(384),s=n(908))},i=function(t,e){return l(),t=s.normalize(t),c.readFileSync(t,e?void 0:"utf8")},u=t=>((t=i(t,!0)).buffer||(t=new Uint8Array(t)),t),o=(t,e,n)=>{l(),t=s.normalize(t),c.readFile(t,(function(t,r){t?n(t):e(r.buffer)}))},1{if(_||0{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.send(null),e.responseText},y&&(u=t=>{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.responseType="arraybuffer",e.send(null),new Uint8Array(e.response)}),o=(t,e,n)=>{var r=new XMLHttpRequest;r.open("GET",t,!0),r.responseType="arraybuffer",r.onload=()=>{200==r.status||0==r.status&&r.response?e(r.response):n()},r.onerror=n,r.send(null)});var g,v=e.print||console.log.bind(console),w=e.printErr||console.warn.bind(console);Object.assign(e,f),f=null,e.thisProgram&&(p=e.thisProgram),e.quit&&(h=e.quit),e.wasmBinary&&(g=e.wasmBinary);var _=e.noExitRuntime||!1;"object"!=typeof WebAssembly&&V("no native wasm support detected");var O,A,S,T,E,M,C=!1,x="undefined"!=typeof TextDecoder?new TextDecoder("utf8"):void 0;function R(t,e,n){var r=(e>>>=0)+n;for(n=e;t[n]&&!(n>=r);)++n;if(16(a=224==(240&a)?(15&a)<<12|i<<6|o:(7&a)<<18|i<<12|o<<6|63&t[e++])?r+=String.fromCharCode(a):(a-=65536,r+=String.fromCharCode(55296|a>>10,56320|1023&a))}}else r+=String.fromCharCode(a)}return r}function j(t,e){return(t>>>=0)?R(T,t,e):""}function k(t,e,n,r){if(!(0>>=0;r=n+r-1;for(var i=0;i=o&&(o=65536+((1023&o)<<10)|1023&t.charCodeAt(++i)),127>=o){if(n>=r)break;e[n++>>>0]=o}else{if(2047>=o){if(n+1>=r)break;e[n++>>>0]=192|o>>6}else{if(65535>=o){if(n+2>=r)break;e[n++>>>0]=224|o>>12}else{if(n+3>=r)break;e[n++>>>0]=240|o>>18,e[n++>>>0]=128|o>>12&63}e[n++>>>0]=128|o>>6&63}e[n++>>>0]=128|63&o}}return e[n>>>0]=0,n-a}function D(t){for(var e=0,n=0;n=r?e++:2047>=r?e+=2:55296<=r&&57343>=r?(e+=4,++n):e+=3}return e}function P(){var t=O.buffer;A=t,e.HEAP8=S=new Int8Array(t),e.HEAP16=new Int16Array(t),e.HEAP32=E=new Int32Array(t),e.HEAPU8=T=new Uint8Array(t),e.HEAPU16=new Uint16Array(t),e.HEAPU32=M=new Uint32Array(t),e.HEAPF32=new Float32Array(t),e.HEAPF64=new Float64Array(t)}var U,F=[],I=[],W=[],H=[],L=0;function z(){var t=e.preRun.shift();F.unshift(t)}var Y,B=0,G=null,N=null;function V(t){throw e.onAbort&&e.onAbort(t),w(t="Aborted("+t+")"),C=!0,t=new WebAssembly.RuntimeError(t+". Build with -sASSERTIONS for more info."),a(t),t}function $(){return Y.startsWith("data:application/octet-stream;base64,")}if(Y="ort-wasm.wasm",!$()){var q=Y;Y=e.locateFile?e.locateFile(q,m):m+q}function X(){var t=Y;try{if(t==Y&&g)return new Uint8Array(g);if(u)return u(t);throw"both async and sync fetching of the wasm failed"}catch(t){V(t)}}function J(t){this.name="ExitStatus",this.message="Program terminated with exit("+t+")",this.status=t}function Z(t){for(;0>2>>>0]=t},this.Eb=function(){return M[this.zb+4>>2>>>0]},this.Sb=function(t){M[this.zb+8>>2>>>0]=t},this.Wb=function(){return M[this.zb+8>>2>>>0]},this.Tb=function(){E[this.zb>>2>>>0]=0},this.Ib=function(t){S[this.zb+12>>0>>>0]=t?1:0},this.Pb=function(){return 0!=S[this.zb+12>>0>>>0]},this.Jb=function(t){S[this.zb+13>>0>>>0]=t?1:0},this.Lb=function(){return 0!=S[this.zb+13>>0>>>0]},this.Rb=function(t,e){this.Fb(0),this.Ub(t),this.Sb(e),this.Tb(),this.Ib(!1),this.Jb(!1)},this.Nb=function(){E[this.zb>>2>>>0]+=1},this.Xb=function(){var t=E[this.zb>>2>>>0];return E[this.zb>>2>>>0]=t-1,1===t},this.Fb=function(t){M[this.zb+16>>2>>>0]=t},this.Ob=function(){return M[this.zb+16>>2>>>0]},this.Qb=function(){if(Mt(this.Eb()))return M[this.Db>>2>>>0];var t=this.Ob();return 0!==t?t:this.Db}}function nt(t){return vt(new et(t).zb)}var rt=[];function at(t){var e=rt[t];return e||(t>=rt.length&&(rt.length=t+1),rt[t]=e=U.get(t)),e}function it(t){var e=D(t)+1,n=gt(e);return n&&k(t,S,n,e),n}var ot={};function ut(){if(!ct){var t,e={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:("object"==typeof navigator&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:p||"./this.program"};for(t in ot)void 0===ot[t]?delete e[t]:e[t]=ot[t];var n=[];for(t in e)n.push(t+"="+e[t]);ct=n}return ct}var ct,st=[null,[],[]];function lt(t,e){var n=st[t];0===e||10===e?((1===t?v:w)(R(n,0)),n.length=0):n.push(e)}var ft=0;function pt(t){return 0==t%4&&(0!=t%100||0==t%400)}var ht=[31,29,31,30,31,30,31,31,30,31,30,31],dt=[31,28,31,30,31,30,31,31,30,31,30,31];function yt(t,e,n,r){function a(t,e,n){for(t="number"==typeof t?t.toString():t||"";t.lengtht?-1:0r-t.getDate())){t.setDate(t.getDate()+e);break}e-=r-t.getDate()+1,t.setDate(1),11>n?t.setMonth(n+1):(t.setMonth(0),t.setFullYear(t.getFullYear()+1))}return n=new Date(t.getFullYear()+1,0,4),e=u(new Date(t.getFullYear(),0,4)),n=u(n),0>=o(e,t)?0>=o(n,t)?t.getFullYear()+1:t.getFullYear():t.getFullYear()-1}var s=E[r+40>>2>>>0];for(var l in r={$b:E[r>>2>>>0],Zb:E[r+4>>2>>>0],Gb:E[r+8>>2>>>0],Kb:E[r+12>>2>>>0],Hb:E[r+16>>2>>>0],Cb:E[r+20>>2>>>0],Ab:E[r+24>>2>>>0],Bb:E[r+28>>2>>>0],bc:E[r+32>>2>>>0],Yb:E[r+36>>2>>>0],ac:s?j(s):""},n=j(n),s={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"})n=n.replace(new RegExp(l,"g"),s[l]);var f="Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),p="January February March April May June July August September October November December".split(" ");for(l in s={"%a":function(t){return f[t.Ab].substring(0,3)},"%A":function(t){return f[t.Ab]},"%b":function(t){return p[t.Hb].substring(0,3)},"%B":function(t){return p[t.Hb]},"%C":function(t){return i((t.Cb+1900)/100|0,2)},"%d":function(t){return i(t.Kb,2)},"%e":function(t){return a(t.Kb,2," ")},"%g":function(t){return c(t).toString().substring(2)},"%G":function(t){return c(t)},"%H":function(t){return i(t.Gb,2)},"%I":function(t){return 0==(t=t.Gb)?t=12:12t.Gb?"AM":"PM"},"%S":function(t){return i(t.$b,2)},"%t":function(){return"\\t"},"%u":function(t){return t.Ab||7},"%U":function(t){return i(Math.floor((t.Bb+7-t.Ab)/7),2)},"%V":function(t){var e=Math.floor((t.Bb+7-(t.Ab+6)%7)/7);if(2>=(t.Ab+371-t.Bb-2)%7&&e++,e)53==e&&(4==(n=(t.Ab+371-t.Bb)%7)||3==n&&pt(t.Cb)||(e=1));else{e=52;var n=(t.Ab+7-t.Bb-1)%7;(4==n||5==n&&pt(t.Cb%400-1))&&e++}return i(e,2)},"%w":function(t){return t.Ab},"%W":function(t){return i(Math.floor((t.Bb+7-(t.Ab+6)%7)/7),2)},"%y":function(t){return(t.Cb+1900).toString().substring(2)},"%Y":function(t){return t.Cb+1900},"%z":function(t){var e=0<=(t=t.Yb);return t=Math.abs(t)/60,(e?"+":"-")+String("0000"+(t/60*100+t%60)).slice(-4)},"%Z":function(t){return t.ac},"%%":function(){return"%"}},n=n.replace(/%%/g,"\\0\\0"),s)n.includes(l)&&(n=n.replace(new RegExp(l,"g"),s[l](r)));return l=function(t){var e=Array(D(t)+1);return k(t,e,0,e.length),e}(n=n.replace(/\\0\\0/g,"%")),l.length>e?0:(S.set(l,t>>>0),l.length-1)}var bt={a:function(t){return gt(t+24)+24},m:function(t){return(t=new et(t)).Pb()||(t.Ib(!0),K--),t.Jb(!1),Q.push(t),t.Nb(),t.Qb()},ia:function(t){throw w("Unexpected exception thrown, this is not properly supported - aborting"),C=!0,t},w:function(){Ot(0);var t=Q.pop();if(t.Xb()&&!t.Lb()){var e=t.Wb();e&&at(e)(t.Db),nt(t.Db)}tt=0},d:function(){var t=tt;if(!t)return ft=0;var e=new et(t);e.Fb(t);var n=e.Eb();if(!n)return ft=0,t;for(var r=Array.prototype.slice.call(arguments),a=0;a>>2]+4294967296*E[t+4>>>2])),E[e>>2>>>0]=t.getUTCSeconds(),E[e+4>>2>>>0]=t.getUTCMinutes(),E[e+8>>2>>>0]=t.getUTCHours(),E[e+12>>2>>>0]=t.getUTCDate(),E[e+16>>2>>>0]=t.getUTCMonth(),E[e+20>>2>>>0]=t.getUTCFullYear()-1900,E[e+24>>2>>>0]=t.getUTCDay(),E[e+28>>2>>>0]=(t.getTime()-Date.UTC(t.getUTCFullYear(),0,1,0,0,0,0))/864e5|0},Ea:function(t,e){t=new Date(1e3*(M[t>>>2]+4294967296*E[t+4>>>2])),E[e>>2>>>0]=t.getSeconds(),E[e+4>>2>>>0]=t.getMinutes(),E[e+8>>2>>>0]=t.getHours(),E[e+12>>2>>>0]=t.getDate(),E[e+16>>2>>>0]=t.getMonth(),E[e+20>>2>>>0]=t.getFullYear()-1900,E[e+24>>2>>>0]=t.getDay();var n=new Date(t.getFullYear(),0,1);E[e+28>>2>>>0]=(t.getTime()-n.getTime())/864e5|0,E[e+36>>2>>>0]=-60*t.getTimezoneOffset();var r=new Date(t.getFullYear(),6,1).getTimezoneOffset();n=n.getTimezoneOffset(),E[e+32>>2>>>0]=0|(r!=n&&t.getTimezoneOffset()==Math.min(n,r))},Fa:function(t){var e=new Date(E[t+20>>2>>>0]+1900,E[t+16>>2>>>0],E[t+12>>2>>>0],E[t+8>>2>>>0],E[t+4>>2>>>0],E[t>>2>>>0],0),n=E[t+32>>2>>>0],r=e.getTimezoneOffset(),a=new Date(e.getFullYear(),0,1),i=new Date(e.getFullYear(),6,1).getTimezoneOffset(),o=a.getTimezoneOffset(),u=Math.min(o,i);return 0>n?E[t+32>>2>>>0]=Number(i!=o&&u==r):0>2>>>0]=e.getDay(),E[t+28>>2>>>0]=(e.getTime()-a.getTime())/864e5|0,E[t>>2>>>0]=e.getSeconds(),E[t+4>>2>>>0]=e.getMinutes(),E[t+8>>2>>>0]=e.getHours(),E[t+12>>2>>>0]=e.getDate(),E[t+16>>2>>>0]=e.getMonth(),e.getTime()/1e3|0},sa:function(){return-52},ta:function(){},Ga:function t(e,n,r){t.Vb||(t.Vb=!0,function(t,e,n){function r(t){return(t=t.toTimeString().match(/\\(([A-Za-z ]+)\\)$/))?t[1]:"GMT"}var a=(new Date).getFullYear(),i=new Date(a,0,1),o=new Date(a,6,1);a=i.getTimezoneOffset();var u=o.getTimezoneOffset();E[t>>2>>>0]=60*Math.max(a,u),E[e>>2>>>0]=Number(a!=u),t=r(i),e=r(o),t=it(t),e=it(e),u>2>>>0]=t,M[n+4>>2>>>0]=e):(M[n>>2>>>0]=e,M[n+4>>2>>>0]=t)}(e,n,r))},B:function(){V("")},ma:function(){return 4294901760},I:b?()=>{var t=process.hrtime();return 1e3*t[0]+t[1]/1e6}:()=>performance.now(),xa:function(t,e,n){T.copyWithin(t>>>0,e>>>0,e+n>>>0)},G:function(t){var e=T.length;if(4294901760<(t>>>=0))return!1;for(var n=1;4>=n;n*=2){var r=e*(1+.2/n);r=Math.min(r,t+100663296);var a=Math;r=Math.max(t,r),a=a.min.call(a,4294901760,r+(65536-r%65536)%65536);t:{try{O.grow(a-A.byteLength+65535>>>16),P();var i=1;break t}catch(t){}i=void 0}if(i)return!0}return!1},va:function(t,e){var n=0;return ut().forEach((function(r,a){var i=e+n;for(a=M[t+4*a>>2>>>0]=i,i=0;i>0>>>0]=r.charCodeAt(i);S[a>>0>>>0]=0,n+=r.length+1})),0},wa:function(t,e){var n=ut();M[t>>2>>>0]=n.length;var r=0;return n.forEach((function(t){r+=t.length+1})),M[e>>2>>>0]=r,0},ba:function(t){_||0>2>>>0],u=M[e+4>>2>>>0];e+=8;for(var c=0;c>>0]);a+=u}return M[r>>2>>>0]=a,0},c:function(){return ft},ja:function t(e,r){t.Mb||(t.Mb=function(){if("object"==typeof crypto&&"function"==typeof crypto.getRandomValues){var t=new Uint8Array(1);return()=>(crypto.getRandomValues(t),t[0])}if(b)try{var e=n(Object(function(){var t=new Error("Cannot find module \'crypto\'");throw t.code="MODULE_NOT_FOUND",t}()));return()=>e.randomBytes(1)[0]}catch(t){}return()=>V("randomDevice")}());for(var a=0;a>0>>>0]=t.Mb();return 0},ea:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},fa:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},J:function(t){var e=At();try{return at(t)()}catch(t){if(St(e),t!==t+0)throw t;Ot(1,0)}},e:function(t,e){var n=At();try{return at(t)(e)}catch(t){if(St(n),t!==t+0)throw t;Ot(1,0)}},N:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},O:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},j:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},o:function(t,e,n,r){var a=At();try{return at(t)(e,n,r)}catch(t){if(St(a),t!==t+0)throw t;Ot(1,0)}},p:function(t,e,n,r,a){var i=At();try{return at(t)(e,n,r,a)}catch(t){if(St(i),t!==t+0)throw t;Ot(1,0)}},M:function(t,e,n,r,a,i){var o=At();try{return at(t)(e,n,r,a,i)}catch(t){if(St(o),t!==t+0)throw t;Ot(1,0)}},r:function(t,e,n,r,a,i){var o=At();try{return at(t)(e,n,r,a,i)}catch(t){if(St(o),t!==t+0)throw t;Ot(1,0)}},v:function(t,e,n,r,a,i,o){var u=At();try{return at(t)(e,n,r,a,i,o)}catch(t){if(St(u),t!==t+0)throw t;Ot(1,0)}},K:function(t,e,n,r,a,i,o,u){var c=At();try{return at(t)(e,n,r,a,i,o,u)}catch(t){if(St(c),t!==t+0)throw t;Ot(1,0)}},D:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=At();try{return at(t)(e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(St(p),t!==t+0)throw t;Ot(1,0)}},X:function(t,e,n,r,a,i,o,u){var c=At();try{return Ft(t,e,n,r,a,i,o,u)}catch(t){if(St(c),t!==t+0)throw t;Ot(1,0)}},V:function(t,e,n,r,a,i,o){var u=At();try{return xt(t,e,n,r,a,i,o)}catch(t){if(St(u),t!==t+0)throw t;Ot(1,0)}},U:function(t,e,n,r,a){var i=At();try{return It(t,e,n,r,a)}catch(t){if(St(i),t!==t+0)throw t;Ot(1,0)}},Z:function(t,e,n,r){var a=At();try{return Pt(t,e,n,r)}catch(t){if(St(a),t!==t+0)throw t;Ot(1,0)}},W:function(t){var e=At();try{return Ct(t)}catch(t){if(St(e),t!==t+0)throw t;Ot(1,0)}},Y:function(t,e){var n=At();try{return Ut(t,e)}catch(t){if(St(n),t!==t+0)throw t;Ot(1,0)}},T:function(t,e,n){var r=At();try{return Rt(t,e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},f:function(t){var e=At();try{at(t)()}catch(t){if(St(e),t!==t+0)throw t;Ot(1,0)}},q:function(t,e){var n=At();try{at(t)(e)}catch(t){if(St(n),t!==t+0)throw t;Ot(1,0)}},h:function(t,e,n){var r=At();try{at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},da:function(t,e,n,r){var a=At();try{at(t)(e,n,r)}catch(t){if(St(a),t!==t+0)throw t;Ot(1,0)}},l:function(t,e,n,r){var a=At();try{at(t)(e,n,r)}catch(t){if(St(a),t!==t+0)throw t;Ot(1,0)}},t:function(t,e,n,r,a){var i=At();try{at(t)(e,n,r,a)}catch(t){if(St(i),t!==t+0)throw t;Ot(1,0)}},u:function(t,e,n,r,a,i){var o=At();try{at(t)(e,n,r,a,i)}catch(t){if(St(o),t!==t+0)throw t;Ot(1,0)}},x:function(t,e,n,r,a,i,o){var u=At();try{at(t)(e,n,r,a,i,o)}catch(t){if(St(u),t!==t+0)throw t;Ot(1,0)}},z:function(t,e,n,r,a,i,o,u){var c=At();try{at(t)(e,n,r,a,i,o,u)}catch(t){if(St(c),t!==t+0)throw t;Ot(1,0)}},ga:function(t,e,n,r,a,i,o,u,c){var s=At();try{at(t)(e,n,r,a,i,o,u,c)}catch(t){if(St(s),t!==t+0)throw t;Ot(1,0)}},A:function(t,e,n,r,a,i,o,u,c,s,l){var f=At();try{at(t)(e,n,r,a,i,o,u,c,s,l)}catch(t){if(St(f),t!==t+0)throw t;Ot(1,0)}},C:function(t,e,n,r,a,i,o,u,c,s,l,f,p,h,d,y){var b=At();try{at(t)(e,n,r,a,i,o,u,c,s,l,f,p,h,d,y)}catch(t){if(St(b),t!==t+0)throw t;Ot(1,0)}},aa:function(t,e,n,r,a,i,o,u){var c=At();try{jt(t,e,n,r,a,i,o,u)}catch(t){if(St(c),t!==t+0)throw t;Ot(1,0)}},_:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=At();try{Dt(t,e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(St(p),t!==t+0)throw t;Ot(1,0)}},$:function(t,e,n,r,a,i){var o=At();try{kt(t,e,n,r,a,i)}catch(t){if(St(o),t!==t+0)throw t;Ot(1,0)}},n:function(t){return t},F:function(t){ft=t},ha:yt,y:function(t,e,n,r){return yt(t,e,n,r)}};!function(){function t(t){e.asm=t.exports,O=e.asm.Ka,P(),U=e.asm.ib,I.unshift(e.asm.La),B--,e.monitorRunDependencies&&e.monitorRunDependencies(B),0==B&&(null!==G&&(clearInterval(G),G=null),N&&(t=N,N=null,t()))}function n(e){t(e.instance)}function r(t){return function(){if(!g&&(d||y)){if("function"==typeof fetch&&!Y.startsWith("file://"))return fetch(Y,{credentials:"same-origin"}).then((function(t){if(!t.ok)throw"failed to load wasm binary file at \'"+Y+"\'";return t.arrayBuffer()})).catch((function(){return X()}));if(o)return new Promise((function(t,e){o(Y,(function(e){t(new Uint8Array(e))}),e)}))}return Promise.resolve().then((function(){return X()}))}().then((function(t){return WebAssembly.instantiate(t,i)})).then((function(t){return t})).then(t,(function(t){w("failed to asynchronously prepare wasm: "+t),V(t)}))}var i={a:bt};if(B++,e.monitorRunDependencies&&e.monitorRunDependencies(B),e.instantiateWasm)try{return e.instantiateWasm(i,t)}catch(t){return w("Module.instantiateWasm callback failed with error: "+t),!1}(g||"function"!=typeof WebAssembly.instantiateStreaming||$()||Y.startsWith("file://")||b||"function"!=typeof fetch?r(n):fetch(Y,{credentials:"same-origin"}).then((function(t){return WebAssembly.instantiateStreaming(t,i).then(n,(function(t){return w("wasm streaming compile failed: "+t),w("falling back to ArrayBuffer instantiation"),r(n)}))}))).catch(a)}(),e.___wasm_call_ctors=function(){return(e.___wasm_call_ctors=e.asm.La).apply(null,arguments)},e._OrtInit=function(){return(e._OrtInit=e.asm.Ma).apply(null,arguments)},e._OrtCreateSessionOptions=function(){return(e._OrtCreateSessionOptions=e.asm.Na).apply(null,arguments)},e._OrtAppendExecutionProvider=function(){return(e._OrtAppendExecutionProvider=e.asm.Oa).apply(null,arguments)},e._OrtAddSessionConfigEntry=function(){return(e._OrtAddSessionConfigEntry=e.asm.Pa).apply(null,arguments)},e._OrtReleaseSessionOptions=function(){return(e._OrtReleaseSessionOptions=e.asm.Qa).apply(null,arguments)},e._OrtCreateSession=function(){return(e._OrtCreateSession=e.asm.Ra).apply(null,arguments)},e._OrtReleaseSession=function(){return(e._OrtReleaseSession=e.asm.Sa).apply(null,arguments)},e._OrtGetInputCount=function(){return(e._OrtGetInputCount=e.asm.Ta).apply(null,arguments)},e._OrtGetOutputCount=function(){return(e._OrtGetOutputCount=e.asm.Ua).apply(null,arguments)},e._OrtGetInputName=function(){return(e._OrtGetInputName=e.asm.Va).apply(null,arguments)},e._OrtGetOutputName=function(){return(e._OrtGetOutputName=e.asm.Wa).apply(null,arguments)},e._OrtFree=function(){return(e._OrtFree=e.asm.Xa).apply(null,arguments)},e._OrtCreateTensor=function(){return(e._OrtCreateTensor=e.asm.Ya).apply(null,arguments)},e._OrtGetTensorData=function(){return(e._OrtGetTensorData=e.asm.Za).apply(null,arguments)},e._OrtReleaseTensor=function(){return(e._OrtReleaseTensor=e.asm._a).apply(null,arguments)},e._OrtCreateRunOptions=function(){return(e._OrtCreateRunOptions=e.asm.$a).apply(null,arguments)},e._OrtAddRunConfigEntry=function(){return(e._OrtAddRunConfigEntry=e.asm.ab).apply(null,arguments)},e._OrtReleaseRunOptions=function(){return(e._OrtReleaseRunOptions=e.asm.bb).apply(null,arguments)},e._OrtRun=function(){return(e._OrtRun=e.asm.cb).apply(null,arguments)},e._OrtEndProfiling=function(){return(e._OrtEndProfiling=e.asm.db).apply(null,arguments)};var mt,gt=e._malloc=function(){return(gt=e._malloc=e.asm.eb).apply(null,arguments)},vt=e._free=function(){return(vt=e._free=e.asm.fb).apply(null,arguments)},wt=e._fflush=function(){return(wt=e._fflush=e.asm.gb).apply(null,arguments)},_t=e.___funcs_on_exit=function(){return(_t=e.___funcs_on_exit=e.asm.hb).apply(null,arguments)},Ot=e._setThrew=function(){return(Ot=e._setThrew=e.asm.jb).apply(null,arguments)},At=e.stackSave=function(){return(At=e.stackSave=e.asm.kb).apply(null,arguments)},St=e.stackRestore=function(){return(St=e.stackRestore=e.asm.lb).apply(null,arguments)},Tt=e.stackAlloc=function(){return(Tt=e.stackAlloc=e.asm.mb).apply(null,arguments)},Et=e.___cxa_can_catch=function(){return(Et=e.___cxa_can_catch=e.asm.nb).apply(null,arguments)},Mt=e.___cxa_is_pointer_type=function(){return(Mt=e.___cxa_is_pointer_type=e.asm.ob).apply(null,arguments)},Ct=e.dynCall_j=function(){return(Ct=e.dynCall_j=e.asm.pb).apply(null,arguments)},xt=e.dynCall_iiiiij=function(){return(xt=e.dynCall_iiiiij=e.asm.qb).apply(null,arguments)},Rt=e.dynCall_jii=function(){return(Rt=e.dynCall_jii=e.asm.rb).apply(null,arguments)},jt=e.dynCall_viiiiij=function(){return(jt=e.dynCall_viiiiij=e.asm.sb).apply(null,arguments)},kt=e.dynCall_vjji=function(){return(kt=e.dynCall_vjji=e.asm.tb).apply(null,arguments)},Dt=e.dynCall_viiijjjii=function(){return(Dt=e.dynCall_viiijjjii=e.asm.ub).apply(null,arguments)},Pt=e.dynCall_iij=function(){return(Pt=e.dynCall_iij=e.asm.vb).apply(null,arguments)},Ut=e.dynCall_ji=function(){return(Ut=e.dynCall_ji=e.asm.wb).apply(null,arguments)},Ft=e.dynCall_iiiiiij=function(){return(Ft=e.dynCall_iiiiiij=e.asm.xb).apply(null,arguments)},It=e.dynCall_iiij=function(){return(It=e.dynCall_iiij=e.asm.yb).apply(null,arguments)};function Wt(){function t(){if(!mt&&(mt=!0,e.calledRun=!0,!C)){if(Z(I),r(e),e.onRuntimeInitialized&&e.onRuntimeInitialized(),e.postRun)for("function"==typeof e.postRun&&(e.postRun=[e.postRun]);e.postRun.length;){var t=e.postRun.shift();H.unshift(t)}Z(H)}}if(!(0{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.iterateExtraOptions=void 0,e.iterateExtraOptions=(t,n,r,a)=>{if("object"==typeof t&&null!==t){if(r.has(t))throw new Error("Circular reference in options");r.add(t)}Object.entries(t).forEach((([t,i])=>{const o=n?n+t:t;if("object"==typeof i)(0,e.iterateExtraOptions)(i,o+".",r,a);else if("string"==typeof i||"number"==typeof i)a(o,i.toString());else{if("boolean"!=typeof i)throw new Error("Can\'t handle extra config type: "+typeof i);a(o,i?"1":"0")}}))}},586:(t,e,n)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.setRunOptions=void 0;const r=n(967),a=n(983),i=n(361);e.setRunOptions=t=>{const e=(0,i.getInstance)();let n=0;const o=[],u=t||{};try{if(void 0===(null==t?void 0:t.logSeverityLevel))u.logSeverityLevel=2;else if("number"!=typeof t.logSeverityLevel||!Number.isInteger(t.logSeverityLevel)||t.logSeverityLevel<0||t.logSeverityLevel>4)throw new Error(`log serverity level is not valid: ${t.logSeverityLevel}`);if(void 0===(null==t?void 0:t.logVerbosityLevel))u.logVerbosityLevel=0;else if("number"!=typeof t.logVerbosityLevel||!Number.isInteger(t.logVerbosityLevel))throw new Error(`log verbosity level is not valid: ${t.logVerbosityLevel}`);void 0===(null==t?void 0:t.terminate)&&(u.terminate=!1);let i=0;if(void 0!==(null==t?void 0:t.tag)&&(i=(0,a.allocWasmString)(t.tag,o)),n=e._OrtCreateRunOptions(u.logSeverityLevel,u.logVerbosityLevel,!!u.terminate,i),0===n)throw new Error("Can\'t create run options");return void 0!==(null==t?void 0:t.extra)&&(0,r.iterateExtraOptions)(t.extra,"",new WeakSet,((t,r)=>{const i=(0,a.allocWasmString)(t,o),u=(0,a.allocWasmString)(r,o);if(0!==e._OrtAddRunConfigEntry(n,i,u))throw new Error(`Can\'t set a run config entry: ${t} - ${r}`)})),[n,o]}catch(t){throw 0!==n&&e._OrtReleaseRunOptions(n),o.forEach(e._free),t}}},919:(t,e,n)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.setSessionOptions=void 0;const r=n(967),a=n(983),i=n(361);e.setSessionOptions=t=>{const e=(0,i.getInstance)();let n=0;const o=[],u=t||{};(t=>{t.extra||(t.extra={}),t.extra.session||(t.extra.session={});const e=t.extra.session;e.use_ort_model_bytes_directly||(e.use_ort_model_bytes_directly="1")})(u);try{void 0===(null==t?void 0:t.graphOptimizationLevel)&&(u.graphOptimizationLevel="all");const c=(t=>{switch(t){case"disabled":return 0;case"basic":return 1;case"extended":return 2;case"all":return 99;default:throw new Error(`unsupported graph optimization level: ${t}`)}})(u.graphOptimizationLevel);void 0===(null==t?void 0:t.enableCpuMemArena)&&(u.enableCpuMemArena=!0),void 0===(null==t?void 0:t.enableMemPattern)&&(u.enableMemPattern=!0),void 0===(null==t?void 0:t.executionMode)&&(u.executionMode="sequential");const s=(t=>{switch(t){case"sequential":return 0;case"parallel":return 1;default:throw new Error(`unsupported execution mode: ${t}`)}})(u.executionMode);let l=0;if(void 0!==(null==t?void 0:t.logId)&&(l=(0,a.allocWasmString)(t.logId,o)),void 0===(null==t?void 0:t.logSeverityLevel))u.logSeverityLevel=2;else if("number"!=typeof t.logSeverityLevel||!Number.isInteger(t.logSeverityLevel)||t.logSeverityLevel<0||t.logSeverityLevel>4)throw new Error(`log serverity level is not valid: ${t.logSeverityLevel}`);if(void 0===(null==t?void 0:t.logVerbosityLevel))u.logVerbosityLevel=0;else if("number"!=typeof t.logVerbosityLevel||!Number.isInteger(t.logVerbosityLevel))throw new Error(`log verbosity level is not valid: ${t.logVerbosityLevel}`);if(void 0===(null==t?void 0:t.enableProfiling)&&(u.enableProfiling=!1),n=e._OrtCreateSessionOptions(c,!!u.enableCpuMemArena,!!u.enableMemPattern,s,!!u.enableProfiling,0,l,u.logSeverityLevel,u.logVerbosityLevel),0===n)throw new Error("Can\'t create session options");return(null==t?void 0:t.executionProviders)&&((t,e,n)=>{for(const r of e){let e="string"==typeof r?r:r.name;switch(e){case"xnnpack":e="XNNPACK";break;case"wasm":case"cpu":continue;default:throw new Error(`not supported EP: ${e}`)}const o=(0,a.allocWasmString)(e,n);if(0!==(0,i.getInstance)()._OrtAppendExecutionProvider(t,o))throw new Error(`Can\'t append execution provider: ${e}`)}})(n,t.executionProviders,o),void 0!==(null==t?void 0:t.extra)&&(0,r.iterateExtraOptions)(t.extra,"",new WeakSet,((t,r)=>{const i=(0,a.allocWasmString)(t,o),u=(0,a.allocWasmString)(r,o);if(0!==e._OrtAddSessionConfigEntry(n,i,u))throw new Error(`Can\'t set a session config entry: ${t} - ${r}`)})),[n,o]}catch(t){throw 0!==n&&e._OrtReleaseSessionOptions(n),o.forEach(e._free),t}}},983:(t,e,n)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.allocWasmString=void 0;const r=n(361);e.allocWasmString=(t,e)=>{const n=(0,r.getInstance)(),a=n.lengthBytesUTF8(t)+1,i=n._malloc(a);return n.stringToUTF8(t,i,a),e.push(i),i}},349:(t,e,n)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.extractTransferableBuffers=e.endProfiling=e.run=e.releaseSession=e.createSession=e.createSessionFinalize=e.createSessionAllocate=e.initOrt=void 0;const r=n(586),a=n(919),i=n(983),o=n(361);e.initOrt=(t,e)=>{const n=(0,o.getInstance)()._OrtInit(t,e);if(0!==n)throw new Error(`Can\'t initialize onnxruntime. error code = ${n}`)};const u=new Map;e.createSessionAllocate=t=>{const e=(0,o.getInstance)(),n=e._malloc(t.byteLength);return e.HEAPU8.set(t,n),[n,t.byteLength]},e.createSessionFinalize=(t,e)=>{const n=(0,o.getInstance)();let r=0,i=0,c=[];try{if([i,c]=(0,a.setSessionOptions)(e),r=n._OrtCreateSession(t[0],t[1],i),0===r)throw new Error("Can\'t create a session")}finally{n._free(t[0]),n._OrtReleaseSessionOptions(i),c.forEach(n._free)}const s=n._OrtGetInputCount(r),l=n._OrtGetOutputCount(r),f=[],p=[],h=[],d=[];for(let t=0;t{const r=(0,e.createSessionAllocate)(t);return(0,e.createSessionFinalize)(r,n)},e.releaseSession=t=>{const e=(0,o.getInstance)(),n=u.get(t);if(!n)throw new Error("invalid session id");const r=n[0],a=n[1],i=n[2];a.forEach(e._OrtFree),i.forEach(e._OrtFree),e._OrtReleaseSession(r),u.delete(t)};const c=t=>{switch(t){case"int8":return 3;case"uint8":return 2;case"bool":return 9;case"int16":return 5;case"uint16":return 4;case"int32":return 6;case"uint32":return 12;case"float32":return 1;case"float64":return 11;case"string":return 8;case"int64":return 7;case"uint64":return 13;default:throw new Error(`unsupported data type: ${t}`)}},s=t=>{switch(t){case 3:return"int8";case 2:return"uint8";case 9:return"bool";case 5:return"int16";case 4:return"uint16";case 6:return"int32";case 12:return"uint32";case 1:return"float32";case 11:return"float64";case 8:return"string";case 7:return"int64";case 13:return"uint64";default:throw new Error(`unsupported data type: ${t}`)}},l=t=>{switch(t){case"float32":return Float32Array;case"uint8":case"bool":return Uint8Array;case"int8":return Int8Array;case"uint16":return Uint16Array;case"int16":return Int16Array;case"int32":return Int32Array;case"float64":return Float64Array;case"uint32":return Uint32Array;case"int64":return BigInt64Array;case"uint64":return BigUint64Array;default:throw new Error(`unsupported type: ${t}`)}};e.run=(t,e,n,a,f)=>{const p=(0,o.getInstance)(),h=u.get(t);if(!h)throw new Error("invalid session id");const d=h[0],y=h[1],b=h[2],m=e.length,g=a.length;let v=0,w=[];const _=[],O=[];try{[v,w]=(0,r.setRunOptions)(f);for(let t=0;tp.HEAP32[t++]=e));const n=p._OrtCreateTensor(c(e),o,u,l,r.length);if(0===n)throw new Error("Can\'t create a tensor");_.push(n)}finally{p.stackRestore(s)}}const t=p.stackSave(),o=p.stackAlloc(4*m),u=p.stackAlloc(4*m),h=p.stackAlloc(4*g),A=p.stackAlloc(4*g);try{let n=o/4,r=u/4,i=h/4,c=A/4;for(let t=0;tt*e));if(a=s(o),"string"===a){const t=[];let e=i/4;for(let n=0;n{const e=(0,o.getInstance)(),n=u.get(t);if(!n)throw new Error("invalid session id");const r=n[0],a=e._OrtEndProfiling(r);if(0===a)throw new Error("Can\'t get an profile file name");e._OrtFree(a)},e.extractTransferableBuffers=t=>{const e=[];for(const n of t){const t=n[2];!Array.isArray(t)&&t.buffer&&e.push(t.buffer)}return e}},361:function(t,e,n){"use strict";var r=this&&this.__createBinding||(Object.create?function(t,e,n,r){void 0===r&&(r=n);var a=Object.getOwnPropertyDescriptor(e,n);a&&!("get"in a?!e.__esModule:a.writable||a.configurable)||(a={enumerable:!0,get:function(){return e[n]}}),Object.defineProperty(t,r,a)}:function(t,e,n,r){void 0===r&&(r=n),t[r]=e[n]}),a=this&&this.__setModuleDefault||(Object.create?function(t,e){Object.defineProperty(t,"default",{enumerable:!0,value:e})}:function(t,e){t.default=e}),i=this&&this.__importStar||function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var n in t)"default"!==n&&Object.prototype.hasOwnProperty.call(t,n)&&r(e,t,n);return a(e,t),e},o=this&&this.__importDefault||function(t){return t&&t.__esModule?t:{default:t}};Object.defineProperty(e,"__esModule",{value:!0}),e.dispose=e.getInstance=e.initializeWebAssembly=void 0;const u=i(n(449)),c=o(n(932)),s=n(474);let l,f=!1,p=!1,h=!1;const d=(t,e)=>e?t?"ort-wasm-simd-threaded.wasm":"ort-wasm-threaded.wasm":t?"ort-wasm-simd.wasm":"ort-wasm.wasm";e.initializeWebAssembly=async t=>{if(f)return Promise.resolve();if(p)throw new Error("multiple calls to \'initializeWebAssembly()\' detected.");if(h)throw new Error("previous call to \'initializeWebAssembly()\' failed.");p=!0;const e=t.initTimeout,r=t.numThreads,a=t.simd,i=r>1&&(()=>{try{return"undefined"!=typeof SharedArrayBuffer&&("undefined"!=typeof MessageChannel&&(new MessageChannel).port1.postMessage(new SharedArrayBuffer(1)),WebAssembly.validate(new Uint8Array([0,97,115,109,1,0,0,0,1,4,1,96,0,0,3,2,1,0,5,4,1,3,1,1,10,11,1,9,0,65,0,254,16,2,0,26,11])))}catch(t){return!1}})(),o=a&&(()=>{try{return WebAssembly.validate(new Uint8Array([0,97,115,109,1,0,0,0,1,4,1,96,0,0,3,2,1,0,10,30,1,28,0,65,0,253,15,253,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,253,186,1,26,11]))}catch(t){return!1}})(),y="string"==typeof t.wasmPaths?t.wasmPaths:void 0,b=d(!1,i),m=d(o,i),g="object"==typeof t.wasmPaths?t.wasmPaths[m]:void 0;let v=!1;const w=[];if(e>0&&w.push(new Promise((t=>{setTimeout((()=>{v=!0,t()}),e)}))),w.push(new Promise(((t,e)=>{const r=i?s:c.default,a={locateFile:(t,e)=>i&&t.endsWith(".worker.js")&&"undefined"!=typeof Blob?URL.createObjectURL(new Blob([n(154)],{type:"text/javascript"})):t===b?null!=g?g:(null!=y?y:e)+m:e+t};if(i)if("undefined"==typeof Blob)a.mainScriptUrlOrBlob=u.join("/","ort-wasm-threaded.js");else{const t=`var ortWasmThreaded=(function(){var _scriptDir;return ${r.toString()}})();`;a.mainScriptUrlOrBlob=new Blob([t],{type:"text/javascript"})}r(a).then((e=>{p=!1,f=!0,l=e,t()}),(t=>{p=!1,h=!0,e(t)}))}))),await Promise.race(w),v)throw new Error(`WebAssembly backend initializing failed due to timeout: ${e}ms`)},e.getInstance=()=>{if(f&&l)return l;throw new Error("WebAssembly is not initialized yet.")},e.dispose=()=>{var t;!f||p||h||(p=!0,null===(t=l.PThread)||void 0===t||t.terminateAllThreads(),l=void 0,p=!1,f=!1,h=!0)}},154:t=>{"use strict";t.exports=\'"use strict";var e={},t="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node;if(t){var r=require("worker_threads"),a=r.parentPort;a.on("message",(e=>onmessage({data:e})));var o=require("fs");Object.assign(global,{self:global,require:require,Module:e,location:{href:__filename},Worker:r.Worker,importScripts:function(e){(0,eval)(o.readFileSync(e,"utf8"))},postMessage:function(e){a.postMessage(e)},performance:global.performance||{now:function(){return Date.now()}}})}var s=!1,n=[],i=function(){var e=Array.prototype.slice.call(arguments).join(" ");t?o.writeSync(2,e+"\\\\n"):console.error(e)};self.alert=function(){var t=Array.prototype.slice.call(arguments).join(" ");postMessage({cmd:"alert",text:t,threadId:e._pthread_self()})},e.instantiateWasm=(t,r)=>{var a=new WebAssembly.Instance(e.wasmModule,t);return r(a),e.wasmModule=null,a.exports},self.onunhandledrejection=e=>{throw e.reason??e},self.onmessage=t=>{try{if("load"===t.data.cmd){if(e.wasmModule=t.data.wasmModule,e.wasmMemory=t.data.wasmMemory,e.buffer=e.wasmMemory.buffer,e.ENVIRONMENT_IS_PTHREAD=!0,"string"==typeof t.data.urlOrBlob)importScripts(t.data.urlOrBlob);else{var r=URL.createObjectURL(t.data.urlOrBlob);importScripts(r),URL.revokeObjectURL(r)}ortWasmThreaded(e).then((function(t){e=t}))}else if("run"===t.data.cmd){e.__performance_now_clock_drift=performance.now()-t.data.time,e.__emscripten_thread_init(t.data.pthread_ptr,0,0,1),e.establishStackSpace(),e.PThread.receiveObjectTransfer(t.data),e.PThread.threadInitTLS(),s||(n.forEach((t=>{e.executeNotifiedProxyingQueue(t)})),n=[],s=!0);try{e.invokeEntryPoint(t.data.start_routine,t.data.arg)}catch(t){if("unwind"!=t){if(!(t instanceof e.ExitStatus))throw t;e.keepRuntimeAlive()||e.__emscripten_thread_exit(t.status)}}}else"cancel"===t.data.cmd?e._pthread_self()&&e.__emscripten_thread_exit(-1):"setimmediate"===t.data.target||("processProxyingQueue"===t.data.cmd?s?e.executeNotifiedProxyingQueue(t.data.queue):n.push(t.data.queue):(i("worker.js received unknown command "+t.data.cmd),i(t.data)))}catch(t){throw i("worker.js onmessage() captured an uncaught exception: "+t),t&&t.stack&&i(t.stack),e.__emscripten_thread_crashed&&e.__emscripten_thread_crashed(),t}};\\n\'},384:()=>{},993:()=>{},908:()=>{},953:()=>{},925:()=>{},449:()=>{}},e={};function n(r){var a=e[r];if(void 0!==a)return a.exports;var i=e[r]={exports:{}};return t[r].call(i.exports,i,i.exports,n),i.exports}n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"==typeof window)return window}}(),(()=>{"use strict";const t=n(349),e=n(361);self.onmessage=n=>{switch(n.data.type){case"init-wasm":(0,e.initializeWebAssembly)(n.data.in).then((()=>postMessage({type:"init-wasm"})),(t=>postMessage({type:"init-wasm",err:t})));break;case"init-ort":try{const{numThreads:e,loggingLevel:r}=n.data.in;(0,t.initOrt)(e,r),postMessage({type:"init-ort"})}catch(t){postMessage({type:"init-ort",err:t})}break;case"create_allocate":try{const{model:e}=n.data.in,r=(0,t.createSessionAllocate)(e);postMessage({type:"create_allocate",out:r})}catch(t){postMessage({type:"create_allocate",err:t})}break;case"create_finalize":try{const{modeldata:e,options:r}=n.data.in,a=(0,t.createSessionFinalize)(e,r);postMessage({type:"create_finalize",out:a})}catch(t){postMessage({type:"create_finalize",err:t})}break;case"create":try{const{model:e,options:r}=n.data.in,a=(0,t.createSession)(e,r);postMessage({type:"create",out:a})}catch(t){postMessage({type:"create",err:t})}break;case"release":try{const e=n.data.in;(0,t.releaseSession)(e),postMessage({type:"release"})}catch(t){postMessage({type:"release",err:t})}break;case"run":try{const{sessionId:e,inputIndices:r,inputs:a,outputIndices:i,options:o}=n.data.in,u=(0,t.run)(e,r,a,i,o);postMessage({type:"run",out:u},(0,t.extractTransferableBuffers)(u))}catch(t){postMessage({type:"run",err:t})}break;case"end-profiling":try{const e=n.data.in;(0,t.endProfiling)(e),postMessage({type:"end-profiling"})}catch(t){postMessage({type:"end-profiling",err:t})}}}})()})();\n',"Worker",void 0,void 0)}},477:y=>{y.exports=function(n,a,u,c){var p=self||window;try{try{var s;try{s=new p.Blob([n])}catch{(s=new(p.BlobBuilder||p.WebKitBlobBuilder||p.MozBlobBuilder||p.MSBlobBuilder)).append(n),s=s.getBlob()}var h=p.URL||p.webkitURL,f=h.createObjectURL(s),l=new p[a](f,u);return h.revokeObjectURL(f),l}catch{return new p[a]("data:application/javascript,".concat(encodeURIComponent(n)),u)}}catch{if(!c)throw Error("Inline worker is not supported");return new p[a](c,u)}}},4154:y=>{y.exports=`"use strict";var e={},t="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node;if(t){var r=require("worker_threads"),a=r.parentPort;a.on("message",(e=>onmessage({data:e})));var o=require("fs");Object.assign(global,{self:global,require:require,Module:e,location:{href:__filename},Worker:r.Worker,importScripts:function(e){(0,eval)(o.readFileSync(e,"utf8"))},postMessage:function(e){a.postMessage(e)},performance:global.performance||{now:function(){return Date.now()}}})}var s=!1,n=[],i=function(){var e=Array.prototype.slice.call(arguments).join(" ");t?o.writeSync(2,e+"\\n"):console.error(e)};self.alert=function(){var t=Array.prototype.slice.call(arguments).join(" ");postMessage({cmd:"alert",text:t,threadId:e._pthread_self()})},e.instantiateWasm=(t,r)=>{var a=new WebAssembly.Instance(e.wasmModule,t);return r(a),e.wasmModule=null,a.exports},self.onunhandledrejection=e=>{throw e.reason??e},self.onmessage=t=>{try{if("load"===t.data.cmd){if(e.wasmModule=t.data.wasmModule,e.wasmMemory=t.data.wasmMemory,e.buffer=e.wasmMemory.buffer,e.ENVIRONMENT_IS_PTHREAD=!0,"string"==typeof t.data.urlOrBlob)importScripts(t.data.urlOrBlob);else{var r=URL.createObjectURL(t.data.urlOrBlob);importScripts(r),URL.revokeObjectURL(r)}ortWasmThreaded(e).then((function(t){e=t}))}else if("run"===t.data.cmd){e.__performance_now_clock_drift=performance.now()-t.data.time,e.__emscripten_thread_init(t.data.pthread_ptr,0,0,1),e.establishStackSpace(),e.PThread.receiveObjectTransfer(t.data),e.PThread.threadInitTLS(),s||(n.forEach((t=>{e.executeNotifiedProxyingQueue(t)})),n=[],s=!0);try{e.invokeEntryPoint(t.data.start_routine,t.data.arg)}catch(t){if("unwind"!=t){if(!(t instanceof e.ExitStatus))throw t;e.keepRuntimeAlive()||e.__emscripten_thread_exit(t.status)}}}else"cancel"===t.data.cmd?e._pthread_self()&&e.__emscripten_thread_exit(-1):"setimmediate"===t.data.target||("processProxyingQueue"===t.data.cmd?s?e.executeNotifiedProxyingQueue(t.data.queue):n.push(t.data.queue):(i("worker.js received unknown command "+t.data.cmd),i(t.data)))}catch(t){throw i("worker.js onmessage() captured an uncaught exception: "+t),t&&t.stack&&i(t.stack),e.__emscripten_thread_crashed&&e.__emscripten_thread_crashed(),t}}; -`},1670:y=>{y.exports=__WEBPACK_EXTERNAL_MODULE__1670__},7067:()=>{},1296:()=>{},1384:()=>{},3993:()=>{},908:()=>{},6953:()=>{},9925:()=>{},2806:()=>{},6449:()=>{},2850:()=>{},5381:()=>{},5686:(y,n,a)=>{a.r(n),a.d(n,{flatbuffers:()=>u});var u={};u.Offset,u.Table,u.SIZEOF_SHORT=2,u.SIZEOF_INT=4,u.FILE_IDENTIFIER_LENGTH=4,u.SIZE_PREFIX_LENGTH=4,u.Encoding={UTF8_BYTES:1,UTF16_STRING:2},u.int32=new Int32Array(2),u.float32=new Float32Array(u.int32.buffer),u.float64=new Float64Array(u.int32.buffer),u.isLittleEndian=new Uint16Array(new Uint8Array([1,0]).buffer)[0]===1,u.Long=function(c,p){this.low=0|c,this.high=0|p},u.Long.create=function(c,p){return c==0&&p==0?u.Long.ZERO:new u.Long(c,p)},u.Long.prototype.toFloat64=function(){return(this.low>>>0)+4294967296*this.high},u.Long.prototype.equals=function(c){return this.low==c.low&&this.high==c.high},u.Long.ZERO=new u.Long(0,0),u.Builder=function(c){if(c)p=c;else var p=1024;this.bb=u.ByteBuffer.allocate(p),this.space=p,this.minalign=1,this.vtable=null,this.vtable_in_use=0,this.isNested=!1,this.object_start=0,this.vtables=[],this.vector_num_elems=0,this.force_defaults=!1},u.Builder.prototype.clear=function(){this.bb.clear(),this.space=this.bb.capacity(),this.minalign=1,this.vtable=null,this.vtable_in_use=0,this.isNested=!1,this.object_start=0,this.vtables=[],this.vector_num_elems=0,this.force_defaults=!1},u.Builder.prototype.forceDefaults=function(c){this.force_defaults=c},u.Builder.prototype.dataBuffer=function(){return this.bb},u.Builder.prototype.asUint8Array=function(){return this.bb.bytes().subarray(this.bb.position(),this.bb.position()+this.offset())},u.Builder.prototype.prep=function(c,p){c>this.minalign&&(this.minalign=c);for(var s=1+~(this.bb.capacity()-this.space+p)&c-1;this.space=0&&this.vtable[p]==0;p--);for(var s=p+1;p>=0;p--)this.addInt16(this.vtable[p]!=0?c-this.vtable[p]:0);this.addInt16(c-this.object_start);var h=(s+2)*u.SIZEOF_SHORT;this.addInt16(h);var f=0,l=this.space;e:for(p=0;p=0;l--)this.writeInt8(f.charCodeAt(l))}this.prep(this.minalign,u.SIZEOF_INT+h),this.addOffset(c),h&&this.addInt32(this.bb.capacity()-this.space),this.bb.setPosition(this.space)},u.Builder.prototype.finishSizePrefixed=function(c,p){this.finish(c,p,!0)},u.Builder.prototype.requiredField=function(c,p){var s=this.bb.capacity()-c,h=s-this.bb.readInt32(s);if(this.bb.readInt16(h+p)==0)throw new Error("FlatBuffers: field "+p+" must be set")},u.Builder.prototype.startVector=function(c,p,s){this.notNested(),this.vector_num_elems=p,this.prep(u.SIZEOF_INT,c*p),this.prep(s,c*p)},u.Builder.prototype.endVector=function(){return this.writeInt32(this.vector_num_elems),this.offset()},u.Builder.prototype.createString=function(c){if(c instanceof Uint8Array)var p=c;else{p=[];for(var s=0;s=56320?f:(f<<10)+c.charCodeAt(s++)+-56613888)<128?p.push(h):(h<2048?p.push(h>>6&31|192):(h<65536?p.push(h>>12&15|224):p.push(h>>18&7|240,h>>12&63|128),p.push(h>>6&63|128)),p.push(63&h|128))}}this.addInt8(0),this.startVector(1,p.length,1),this.bb.setPosition(this.space-=p.length),s=0;for(var l=this.space,o=this.bb.bytes();s>24},u.ByteBuffer.prototype.readUint8=function(c){return this.bytes_[c]},u.ByteBuffer.prototype.readInt16=function(c){return this.readUint16(c)<<16>>16},u.ByteBuffer.prototype.readUint16=function(c){return this.bytes_[c]|this.bytes_[c+1]<<8},u.ByteBuffer.prototype.readInt32=function(c){return this.bytes_[c]|this.bytes_[c+1]<<8|this.bytes_[c+2]<<16|this.bytes_[c+3]<<24},u.ByteBuffer.prototype.readUint32=function(c){return this.readInt32(c)>>>0},u.ByteBuffer.prototype.readInt64=function(c){return new u.Long(this.readInt32(c),this.readInt32(c+4))},u.ByteBuffer.prototype.readUint64=function(c){return new u.Long(this.readUint32(c),this.readUint32(c+4))},u.ByteBuffer.prototype.readFloat32=function(c){return u.int32[0]=this.readInt32(c),u.float32[0]},u.ByteBuffer.prototype.readFloat64=function(c){return u.int32[u.isLittleEndian?0:1]=this.readInt32(c),u.int32[u.isLittleEndian?1:0]=this.readInt32(c+4),u.float64[0]},u.ByteBuffer.prototype.writeInt8=function(c,p){this.bytes_[c]=p},u.ByteBuffer.prototype.writeUint8=function(c,p){this.bytes_[c]=p},u.ByteBuffer.prototype.writeInt16=function(c,p){this.bytes_[c]=p,this.bytes_[c+1]=p>>8},u.ByteBuffer.prototype.writeUint16=function(c,p){this.bytes_[c]=p,this.bytes_[c+1]=p>>8},u.ByteBuffer.prototype.writeInt32=function(c,p){this.bytes_[c]=p,this.bytes_[c+1]=p>>8,this.bytes_[c+2]=p>>16,this.bytes_[c+3]=p>>24},u.ByteBuffer.prototype.writeUint32=function(c,p){this.bytes_[c]=p,this.bytes_[c+1]=p>>8,this.bytes_[c+2]=p>>16,this.bytes_[c+3]=p>>24},u.ByteBuffer.prototype.writeInt64=function(c,p){this.writeInt32(c,p.low),this.writeInt32(c+4,p.high)},u.ByteBuffer.prototype.writeUint64=function(c,p){this.writeUint32(c,p.low),this.writeUint32(c+4,p.high)},u.ByteBuffer.prototype.writeFloat32=function(c,p){u.float32[0]=p,this.writeInt32(c,u.int32[0])},u.ByteBuffer.prototype.writeFloat64=function(c,p){u.float64[0]=p,this.writeInt32(c,u.int32[u.isLittleEndian?0:1]),this.writeInt32(c+4,u.int32[u.isLittleEndian?1:0])},u.ByteBuffer.prototype.getBufferIdentifier=function(){if(this.bytes_.length>10),56320+(1023&l)))}return h},u.ByteBuffer.prototype.__indirect=function(c){return c+this.readInt32(c)},u.ByteBuffer.prototype.__vector=function(c){return c+this.readInt32(c)+u.SIZEOF_INT},u.ByteBuffer.prototype.__vector_len=function(c){return this.readInt32(c+this.readInt32(c))},u.ByteBuffer.prototype.__has_identifier=function(c){if(c.length!=u.FILE_IDENTIFIER_LENGTH)throw new Error("FlatBuffers: file identifier must be length "+u.FILE_IDENTIFIER_LENGTH);for(var p=0;p{var n=y&&y.__esModule?()=>y.default:()=>y;return __webpack_require__.d(n,{a:n}),n},__webpack_require__.d=(y,n)=>{for(var a in n)__webpack_require__.o(n,a)&&!__webpack_require__.o(y,a)&&Object.defineProperty(y,a,{enumerable:!0,get:n[a]})},__webpack_require__.g=function(){if(typeof globalThis=="object")return globalThis;try{return this||new Function("return this")()}catch{if(typeof window=="object")return window}}(),__webpack_require__.o=(y,n)=>Object.prototype.hasOwnProperty.call(y,n),__webpack_require__.r=y=>{typeof Symbol<"u"&&Symbol.toStringTag&&Object.defineProperty(y,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(y,"__esModule",{value:!0})};var __webpack_exports__=__webpack_require__(6018);return __webpack_exports__})())})(ortWeb_min$1);var ortWeb_minExports=ortWeb_min$1.exports,ortWeb_min=getDefaultExportFromCjs(ortWeb_minExports),ONNX_WEB=_mergeNamespaces({__proto__:null,default:ortWeb_min},[ortWeb_minExports]);let ONNX;const executionProviders=["wasm"];typeof process<"u"&&((nt=process==null?void 0:process.release)==null?void 0:nt.name)==="node"?(ONNX=fs??ONNX_NODE,executionProviders.unshift("cpu")):(ONNX=ortWeb_min??ONNX_WEB,typeof navigator<"u"&&/iP(hone|od|ad).+16_4.+AppleWebKit/.test(navigator.userAgent)&&(ONNX.env.wasm.simd=!1));const{env:onnx_env}=ONNX,VERSION="2.3.1",WEB_CACHE_AVAILABLE=typeof self<"u"&&"caches"in self,FS_AVAILABLE=!isEmpty(fs),PATH_AVAILABLE=!isEmpty(fs),RUNNING_LOCALLY=FS_AVAILABLE&&PATH_AVAILABLE,__dirname=RUNNING_LOCALLY?fs.dirname(fs.dirname(fs.fileURLToPath(self.location.href))):"./",DEFAULT_CACHE_DIR=RUNNING_LOCALLY?fs.join(__dirname,"/.cache/"):null,DEFAULT_LOCAL_MODEL_PATH="/models/",localModelPath=RUNNING_LOCALLY?fs.join(__dirname,DEFAULT_LOCAL_MODEL_PATH):DEFAULT_LOCAL_MODEL_PATH;onnx_env.wasm.wasmPaths=RUNNING_LOCALLY?fs.join(__dirname,"/dist/"):`https://cdn.jsdelivr.net/npm/@xenova/transformers@${VERSION}/dist/`;const env={backends:{onnx:onnx_env,tfjs:{}},__dirname,version:VERSION,allowRemoteModels:!0,remoteHost:"https://huggingface.co/",remotePathTemplate:"{model}/resolve/{revision}/",allowLocalModels:!0,localModelPath,useFS:FS_AVAILABLE,useBrowserCache:WEB_CACHE_AVAILABLE,useFSCache:FS_AVAILABLE,cacheDir:DEFAULT_CACHE_DIR};function isEmpty(y){return Object.keys(y).length===0}globalThis.ReadableStream||(globalThis.ReadableStream=fs.ReadableStream);class Headers extends Object{constructor(...n){super(),Object.assign(this,n)}get(n){return this[n]}clone(){return new Headers(this)}}class FileResponse{constructor(n){Se(this,"_CONTENT_TYPE_MAP",{txt:"text/plain",html:"text/html",css:"text/css",js:"text/javascript",json:"application/json",png:"image/png",jpg:"image/jpeg",jpeg:"image/jpeg",gif:"image/gif"});if(this.filePath=n,this.headers=new Headers,this.exists=fs.existsSync(n),this.exists){this.status=200,this.statusText="OK";let a=fs.statSync(n);this.headers["content-length"]=a.size,this.updateContentType();let u=this;this.body=new ReadableStream({start(c){u.arrayBuffer().then(p=>{c.enqueue(new Uint8Array(p)),c.close()})}})}else this.status=404,this.statusText="Not Found",this.body=null}updateContentType(){const n=this.filePath.toString().split(".").pop().toLowerCase();this.headers["content-type"]=this._CONTENT_TYPE_MAP[n]??"application/octet-stream"}clone(){let n=new FileResponse(this.filePath);return n.exists=this.exists,n.status=this.status,n.statusText=this.statusText,n.headers=this.headers.clone(),n}async arrayBuffer(){return(await fs.promises.readFile(this.filePath)).buffer}async blob(){const n=await fs.promises.readFile(this.filePath);return new Blob([n],{type:this.headers["content-type"]})}async text(){return await fs.promises.readFile(this.filePath,"utf8")}async json(){return JSON.parse(await this.text())}}function isValidHttpUrl(y){let n;try{n=new URL(y)}catch{return!1}return n.protocol==="http:"||n.protocol==="https:"}async function getFile(y){var n,a;if(env.useFS&&!isValidHttpUrl(y))return new FileResponse(y);if(typeof process<"u"&&((n=process==null?void 0:process.release)==null?void 0:n.name)==="node"){const u=!!((a=process.env)!=null&&a.TESTING_REMOTELY),c=env.version;return fetch(y,{headers:{"User-Agent":`transformers.js/${c}; is_ci/${u};`}})}else return fetch(y)}const ERROR_MAPPING={400:"Bad request error occurred while trying to load file",401:"Unauthorized access to file",403:"Forbidden access to file",404:"Could not locate file",408:"Request timeout error occurred while trying to load file",500:"Internal server error error occurred while trying to load file",502:"Bad gateway error occurred while trying to load file",503:"Service unavailable error occurred while trying to load file",504:"Gateway timeout error occurred while trying to load file"};function handleError(y,n,a){if(!a)return null;const u=ERROR_MAPPING[y]??`Error (${y}) occurred while trying to load file`;throw Error(`${u}: "${n}".`)}class FileCache{constructor(n){this.path=n}async match(n){let a=fs.join(this.path,n),u=new FileResponse(a);if(u.exists)return u}async put(n,a){const u=Buffer.from(await a.arrayBuffer());let c=fs.join(this.path,n);try{await fs.promises.mkdir(fs.dirname(c),{recursive:!0}),await fs.promises.writeFile(c,u)}catch(p){console.warn("An error occurred while writing the file to cache:",p)}}}async function tryCache(y,...n){for(let a of n)try{let u=await y.match(a);if(u)return u}catch{continue}}async function getModelFile(y,n,a=!0,u={}){if(!env.allowLocalModels&&u.local_files_only)throw Error("Invalid configuration detected: local models are disabled (`env.allowLocalModels=false`) but you have requested to only use local models (`local_files_only=true`).");dispatchCallback(u.progress_callback,{status:"initiate",name:y,file:n});let c;if(!c&&env.useBrowserCache){if(typeof caches>"u")throw Error("Browser cache is not available in this environment.");try{c=await caches.open("transformers-cache")}catch(d){console.warn("An error occurred while opening the browser cache:",d)}}!c&&env.useFSCache&&(c=new FileCache(u.cache_dir??env.cacheDir));const p=u.revision??"main";let s=pathJoin(y,n),h=pathJoin(env.localModelPath,s),f=pathJoin(env.remoteHost,env.remotePathTemplate.replaceAll("{model}",y).replaceAll("{revision}",p),n),l=p==="main"?s:pathJoin(y,p,n),o,t=c instanceof FileCache?l:f,e,r;if(c&&(r=await tryCache(c,h,t)),r===void 0){let d=isValidHttpUrl(s);if(env.allowLocalModels)if(d){if(u.local_files_only)throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${s}.`)}else try{r=await getFile(h),o=h}catch(g){console.warn(`Unable to load from local path "${h}": "${g}"`)}if(r===void 0||r.status===404){if(u.local_files_only||!env.allowRemoteModels){if(a)throw Error(`\`local_files_only=true\` or \`env.allowRemoteModels=false\` and file was not found locally at "${h}".`);return null}if(r=await getFile(f),r.status!==200)return handleError(r.status,f,a);o=t}c&&r instanceof Response&&r.status===200&&(e=r.clone())}dispatchCallback(u.progress_callback,{status:"download",name:y,file:n});const i=await readResponse(r,d=>{dispatchCallback(u.progress_callback,{status:"progress",...d,name:y,file:n})});return e&&o&&await c.match(o)===void 0&&await c.put(o,e).catch(d=>{console.warn(`Unable to add response to browser cache: ${d}.`)}),dispatchCallback(u.progress_callback,{status:"done",name:y,file:n}),i}async function getModelJSON(y,n,a=!0,u={}){let c=await getModelFile(y,n,a,u);if(c===null)return{};let s=new TextDecoder("utf-8").decode(c);return JSON.parse(s)}async function readResponse(y,n){const a=y.headers.get("Content-Length");a===null&&console.warn("Unable to determine content-length from response headers. Will expand buffer when needed.");let u=parseInt(a??"0"),c=new Uint8Array(u),p=0;const s=y.body.getReader();async function h(){const{done:f,value:l}=await s.read();if(f)return;let o=p+l.length;if(o>u){u=o;let e=new Uint8Array(u);e.set(c),c=e}c.set(l,p),p=o;const t=p/u*100;return n({progress:t,loaded:p,total:u}),h()}return await h(),c}function pathJoin(...y){return y=y.map((n,a)=>(a&&(n=n.replace(new RegExp("^/"),"")),a!==y.length-1&&(n=n.replace(new RegExp("/$"),"")),n)),y.join("/")}function interpolate_data(y,[n,a,u],[c,p],s="bilinear",h=!1){const f=p/u,l=c/a,o=new y.constructor(c*p*n),t=a*u,e=c*p;for(let r=0;r=0;--h)c[h]=f,u[h]=n[a[h]],f*=u[h];const p=a.map((h,f)=>c[a.indexOf(f)]),s=new y.constructor(y.length);for(let h=0;h=0;--l)f+=o%n[l]*p[l],o=Math.floor(o/n[l]);s[f]=y[h]}return[s,u]}function softmax(y){const n=max(y)[0],a=y.map(p=>Math.exp(p-n)),u=a.reduce((p,s)=>p+s,0);return a.map(p=>p/u)}function log_softmax(y){return softmax(y).map(u=>Math.log(u))}function getTopItems(y,n=0){return y=Array.from(y).map((a,u)=>[u,a]).sort((a,u)=>u[1]-a[1]),n>0&&(y=y.slice(0,n)),y}function min(y){if(y.length===0)throw Error("Array must not be empty");let n=y[0],a=0;for(let u=1;un&&(n=y[u],a=u);return[n,a]}function rfftfreq(y,n=1){if(!Number.isInteger(y))throw new TypeError(`n should be an integer, but ${y} given.`);const a=1/(y*n),u=Math.floor(y/2)+1,c=new Array(u);for(let p=0;pu;u<<=1)++a;this._width=a%2===0?a-1:a,this._bitrev=new Int32Array(1<>>c&3)<>>1);for(let c=0;c>>1]=n[c];return u}toComplexArray(n,a){const u=a||this.createComplexArray();for(let c=0;c>>1],u[c+1]=0;return u}completeSpectrum(n){const a=this._csize,u=a>>>1;for(let c=2;c>=2;s>=2;s>>=2){h=c/s<<1;let t=h>>>2;for(f=0;f>>1,s>>>1)}else for(f=0,l=0;f>>1,s>>>1,u)}for(s>>=2;s>=2;s>>=2){h=c/s<<1;const t=h>>>1,e=t>>>1,r=e>>>1;for(f=0;f{if(typeof u=="string"){let c=Number(u);if(Number.isInteger(c))return a._getitem(c)}return a[u]},set:(a,u,c)=>a[u]=c})}*[Symbol.iterator](){const[n,...a]=this.dims;if(a.length>0){const u=a.reduce((c,p)=>c*p);for(let c=0;c=a||n<-a)throw new Error(`Index ${n} is out of bounds for dimension 0 with size ${a}`);if(n<0&&(n+=a),u.length>0){const c=u.reduce((p,s)=>p*s);return this._subarray(n,c,u)}else return new Tensor(this.type,[this.data[n]],u)}indexOf(n){for(let a=0;a=this.dims[f])throw new Error(`IndexError: index ${l} is out of bounds for dimension ${f} with size ${this.dims[f]}`);l<0&&(l+=this.dims[f]),u.push([l,l+1])}else if(Array.isArray(l)&&l.length===2){if(l[0]>l[1])throw new Error(`Invalid slice: ${l}`);let o=[Math.max(l[0],0),Math.min(l[1],this.dims[f])];u.push(o),a.push(o[1]-o[0])}else throw new Error(`Invalid slice: ${l}`)}let c=u.map(([f,l])=>l-f),p=c.reduce((f,l)=>f*l),s=new this.data.constructor(p);const h=new Array(this.dims.length);for(let f=c.length-1,l=1;f>=0;--f)h[f]=l,l*=this.dims[f];for(let f=0;f=0;--o){const e=c[o];l+=(t%e+u[o][0])*h[o],t=Math.floor(t/e)}s[f]=this.data[l]}return new Tensor(this.type,s,a)}transpose(...n){return transpose(this,n)}sum(n=null,a=!1){return this.norm(1,n,a)}norm(n="fro",a=null,u=!1){if(n==="fro")n=2;else if(typeof n=="string")throw Error(`Unsupported norm: ${n}`);if(a===null){let s=this.data.reduce((h,f)=>h+f**n,0)**(1/n);return new Tensor(this.type,[s],[1])}a<0&&(a+=this.dims.length);const c=this.dims.slice();c[a]=1;const p=new this.data.constructor(this.data.length/this.dims[a]);for(let s=0;s=0;--f){const t=this.dims[f];if(f!==a){const e=l%t;h+=e*o,o*=c[f]}l=Math.floor(l/t)}p[h]+=this.data[s]**n}if(n!==1)for(let s=0;s=0;--s){const l=this.dims[s];if(s!==a){const o=h%l;p+=o*f,f*=this.dims[s]}h=Math.floor(h/l)}this.data[c]/=u.data[p]}return this}normalize(n=2,a=1){return this.clone().normalize_(n,a)}stride(){const n=new Array(this.dims.length);for(let a=this.dims.length-1,u=1;a>=0;--a)n[a]=u,u*=this.dims[a];return n}squeeze(n=null){return new Tensor(this.type,this.data,calc_squeeze_dims(this.dims,n))}squeeze_(n=null){return this.dims=calc_squeeze_dims(this.dims,n),this}unsqueeze(n=null){return new Tensor(this.type,this.data,calc_unsqueeze_dims(this.dims,n))}unsqueeze_(n=null){return this.dims=calc_unsqueeze_dims(this.dims,n),this}flatten_(n=0,a=-1){a=(a+this.dims.length)%this.dims.length;let u=this.dims.slice(0,n),c=this.dims.slice(n,a+1),p=this.dims.slice(a+1);return this.dims=[...u,c.reduce((s,h)=>s*h,1),...p],this}flatten(n=0,a=-1){return this.clone().flatten_(n,a)}view(...n){let a=-1;for(let u=0;us!==a?c*p:c,1);n[a]=this.data.length/u}return new Tensor(this.type,this.data,n)}}function reshape(y,n){const a=y.length,u=n.reduce((p,s)=>p*s);if(a!==u)throw Error(`cannot reshape array of size ${a} into shape (${n})`);let c=y;for(let p=n.length-1;p>=0;p--)c=c.reduce((s,h)=>{let f=s[s.length-1];return f.lengtha!==1):typeof n=="number"?y[n]===1&&y.splice(n,1):Array.isArray(n)&&(y=y.filter((a,u)=>a!==1||!n.includes(u))),y}function calc_unsqueeze_dims(y,n){return y=y.slice(),n<0&&(n=(n%y.length+y.length)%y.length),y.splice(n,0,1),y}async function loadTokenizer(y,n){return await Promise.all([getModelJSON(y,"tokenizer.json",!0,n),getModelJSON(y,"tokenizer_config.json",!0,n)])}function createPattern(y,n=!0){return y.Regex?new RegExp(n?y.Regex:`(${y.Regex})`,"gu"):y.String?y.String:(console.warn("Unknown pattern type:",y),null)}function clean_up_tokenization(y){return y.replace(/ \./g,".").replace(/ \?/g,"?").replace(/ \!/g,"!").replace(/ ,/g,",").replace(/ \' /g,"'").replace(/ n\'t/g,"n't").replace(/ \'m/g,"'m").replace(/ \'s/g,"'s").replace(/ \'ve/g,"'ve").replace(/ \'re/g,"'re")}function fuse(y,n){let a=[],u=0;for(;uthis.tokens_to_ids.get(u)??this.unk_token_id);return this.fuse_unk&&(a=fuse(a,this.unk_token_id)),a}convert_ids_to_tokens(n){return n.map(a=>this.vocab[a]??this.unk_token)}}class WordPieceTokenizer extends TokenizerModel{constructor(n){super(n),this.tokens_to_ids=n.vocab,this.unk_token_id=this.tokens_to_ids.get(n.unk_token),this.unk_token=n.unk_token,this.vocab=new Array(this.tokens_to_ids.size);for(const[a,u]of this.tokens_to_ids)this.vocab[u]=a}encode(n){let a=[];for(let u of n){let c=[...u],p=!1,s=0,h=[];for(;s0&&(o=this.config.continuing_subword_prefix+o),this.tokens_to_ids.has(o)){l=o;break}--f}if(l===null){p=!0;break}h.push(l),s=f}p?a.push(this.unk_token):a.push(...h)}return a}}class Unigram extends TokenizerModel{constructor(n,a){super(n),this.vocab=new Array(n.vocab.size),this.scores=new Array(n.vocab.size);let u=0;n.vocab.forEach((c,p)=>{this.vocab[u]=p,this.scores[u]=c,++u}),this.unk_token_id=n.unk_id,this.unk_token=this.vocab[n.unk_id],this.tokens_to_ids=new Map(this.vocab.map((c,p)=>[c,p])),this.bosToken=" ",this.bosTokenId=this.tokens_to_ids.get(this.bosToken),this.eosToken=a.eos_token,this.eosTokenId=this.tokens_to_ids.get(this.eosToken),this.unkToken=this.vocab[this.unk_token_id],this.minScore=min(this.scores)[0],this.unkScore=this.minScore-10,this.scores[this.unk_token_id]=this.unkScore,this.trie=new CharTrie,this.trie.extend(this.vocab),this.fuse_unk=!0}populateNodes(n){const a=n.sentence,u=a.length;let c=0;for(;c{const y=[...Array.from({length:"~".charCodeAt(0)-"!".charCodeAt(0)+1},(c,p)=>p+"!".charCodeAt(0)),...Array.from({length:"¬".charCodeAt(0)-"¡".charCodeAt(0)+1},(c,p)=>p+"¡".charCodeAt(0)),...Array.from({length:"ÿ".charCodeAt(0)-"®".charCodeAt(0)+1},(c,p)=>p+"®".charCodeAt(0))];let n=y.slice(),a=0;for(let c=0;c<256;++c)y.includes(c)||(y.push(c),n.push(256+a),a+=1);let u=n.map(c=>String.fromCharCode(c));return Object.fromEntries(y.map((c,p)=>[c,u[p]]))})(),UNICODE_TO_BYTES=reverseDictionary(BYTES_TO_UNICODE);class BPE extends TokenizerModel{constructor(n){super(n),this.tokens_to_ids=n.vocab,this.unk_token_id=this.tokens_to_ids.get(n.unk_token),this.unk_token=n.unk_token,this.vocab=new Array(this.tokens_to_ids.size);for(const[a,u]of this.tokens_to_ids)this.vocab[u]=a;this.bpe_ranks=Object.fromEntries(n.merges.map((a,u)=>[a,u])),this.merges=n.merges.map(a=>a.split(/\s+/)),this.end_of_word_suffix=n.end_of_word_suffix,this.byte_fallback=this.config.byte_fallback??!1,this.byte_fallback&&(this.text_encoder=new TextEncoder),this.cache=Object.create(null),this.fuse_unk??(this.fuse_unk=this.config.fuse_unk)}get_pairs(n){let a=new Set,u=n[0];for(let c=1;c{let r=this.bpe_ranks[t]??1/0,i=this.bpe_ranks[e]??1/0;return r<=i?t:e});if(!(p in this.bpe_ranks))break;let[s,h]=p.split(/\s+/g),f=[],l=0,o=-1;for(;l`<0x${s.toString(16).toUpperCase().padStart(2,"0")}>`)):a.push(this.unk_token)}return a}}class Normalizer extends Callable{constructor(n){super(),this.config=n}static fromConfig(n){if(n===null)return null;switch(n.type){case"BertNormalizer":return new BertNormalizer(n);case"Precompiled":return new Precompiled(n);case"Sequence":return new NormalizerSequence(n);case"Replace":return new Replace(n);case"NFC":return new NFC(n);case"NFKD":return new NFKD(n);case"StripAccents":return new StripAccents(n);case"Lowercase":return new Lowercase(n);case"Prepend":return new Prepend(n);default:throw new Error(`Unknown Normalizer type: ${n.type}`)}}normalize(n){throw Error("normalize should be implemented in subclass.")}_call(n){return this.normalize(n)}}class Replace extends Normalizer{normalize(n){let a=createPattern(this.config.pattern);return a===null||(n=n.replaceAll(a,this.config.content)),n}}class NFC extends Normalizer{normalize(n){return n=n.normalize("NFC"),n}}class NFKD extends Normalizer{normalize(n){return n=n.normalize("NFKD"),n}}class StripAccents extends Normalizer{normalize(n){return n=n.replace(/[\u0300-\u036f]/g,""),n}}class Lowercase extends Normalizer{normalize(n){return n=n.toLowerCase(),n}}class Prepend extends Normalizer{normalize(n){return n=this.config.prepend+n,n}}class NormalizerSequence extends Normalizer{constructor(n){super(n),this.normalizers=n.normalizers.map(a=>Normalizer.fromConfig(a))}normalize(n){return this.normalizers.reduce((a,u)=>u.normalize(a),n)}}class BertNormalizer extends Normalizer{_tokenize_chinese_chars(n){let a=[];for(let u=0;u=19968&&n<=40959||n>=13312&&n<=19903||n>=131072&&n<=173791||n>=173824&&n<=177983||n>=177984&&n<=178207||n>=178208&&n<=183983||n>=63744&&n<=64255||n>=194560&&n<=195103}stripAccents(n){return n.normalize("NFD").replace(/[\u0300-\u036f]/g,"")}normalize(n){return this.config.handle_chinese_chars&&(n=this._tokenize_chinese_chars(n)),this.config.lowercase?(n=n.toLowerCase(),this.config.strip_accents!==!1&&(n=this.stripAccents(n))):this.config.strip_accents&&(n=this.stripAccents(n)),n}}class PreTokenizer extends Callable{static fromConfig(n){if(n===null)return null;switch(n.type){case"BertPreTokenizer":return new BertPreTokenizer(n);case"Sequence":return new PreTokenizerSequence(n);case"WhitespaceSplit":return new WhitespaceSplit(n);case"Metaspace":return new MetaspacePreTokenizer(n);case"ByteLevel":return new ByteLevelPreTokenizer(n);case"Split":return new SplitPreTokenizer(n);case"Punctuation":return new PunctuationPreTokenizer(n);case"Digits":return new DigitsPreTokenizer(n);default:throw new Error(`Unknown PreTokenizer type: ${n.type}`)}}pre_tokenize_text(n){throw Error("pre_tokenize_text should be implemented in subclass.")}pre_tokenize(n){let a=[];return Array.isArray(n)?a=n.map(u=>this.pre_tokenize_text(u)):a=this.pre_tokenize_text(n),a.flat()}_call(n){return this.pre_tokenize(n)}}class BertPreTokenizer extends PreTokenizer{constructor(n){super(),this.pattern=new RegExp(`[^\\s${PUNCTUATION_REGEX}]+|[${PUNCTUATION_REGEX}]`,"gu")}pre_tokenize_text(n){return n.trim().match(this.pattern)||[]}}class ByteLevelPreTokenizer extends PreTokenizer{constructor(n){super(),this.config=n,this.add_prefix_space=this.config.add_prefix_space,this.trim_offsets=this.config.trim_offsets,this.use_regex=this.config.use_regex??!0,this.pattern=/'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+/gu,this.byte_encoder=BYTES_TO_UNICODE,this.text_encoder=new TextEncoder}pre_tokenize_text(n){return(this.use_regex?n.match(this.pattern)||[]:[n]).map(u=>(this.add_prefix_space&&!u.startsWith(" ")&&(u=" "+u),u=Array.from(this.text_encoder.encode(u),c=>this.byte_encoder[c]).join(""),u))}}class SplitPreTokenizer extends PreTokenizer{constructor(n){super(),this.config=n,this.pattern=createPattern(this.config.pattern,this.config.invert)}pre_tokenize_text(n){return this.pattern===null?[]:this.config.invert?n.match(this.pattern)||[]:n.split(this.pattern).filter(a=>a)}}class PunctuationPreTokenizer extends PreTokenizer{constructor(n){super(),this.config=n,this.pattern=new RegExp(`[^${PUNCTUATION_REGEX}]+|[${PUNCTUATION_REGEX}]+`,"gu")}pre_tokenize_text(n){return n.match(this.pattern)||[]}}class DigitsPreTokenizer extends PreTokenizer{constructor(n){super(),this.config=n;const a=`[^\\d]+|\\d${this.config.individual_digits?"":"+"}`;this.pattern=new RegExp(a,"gu")}pre_tokenize_text(n){return n.match(this.pattern)||[]}}class PostProcessor extends Callable{constructor(n){super(),this.config=n}static fromConfig(n){if(n===null)return null;switch(n.type){case"TemplateProcessing":return new TemplateProcessing(n);case"ByteLevel":return new ByteLevelPostProcessor(n);case"RobertaProcessing":return new RobertaProcessing(n);default:throw new Error(`Unknown PostProcessor type: ${n.type}`)}}post_process(n,...a){throw Error("post_process should be implemented in subclass.")}_call(n,...a){return this.post_process(n,...a)}}class RobertaProcessing extends PostProcessor{constructor(n){super(n),this.cls=n.cls[0],this.sep=n.sep[0]}post_process(n,a=null){return n=mergeArrays([this.cls],n,[this.sep]),a!==null&&(n=mergeArrays(n,[this.sep],a,[this.sep])),n}}class TemplateProcessing extends PostProcessor{constructor(n){super(n),this.single=n.single,this.pair=n.pair}post_process(n,a=null){let u=a===null?this.single:this.pair,c=[];for(let p of u)"SpecialToken"in p?c.push(p.SpecialToken.id):"Sequence"in p&&(p.Sequence.id==="A"?c=mergeArrays(c,n):p.Sequence.id==="B"&&(c=mergeArrays(c,a)));return c}}class ByteLevelPostProcessor extends PostProcessor{post_process(n){return n}}class Decoder extends Callable{constructor(n){super(),this.config=n,this.added_tokens=[],this.end_of_word_suffix=null,this.trim_offsets=n.trim_offsets}static fromConfig(n){switch(n.type){case"WordPiece":return new WordPieceDecoder(n);case"Metaspace":return new MetaspaceDecoder(n);case"ByteLevel":return new ByteLevelDecoder(n);case"Replace":return new ReplaceDecoder(n);case"ByteFallback":return new ByteFallback(n);case"Fuse":return new FuseDecoder(n);case"Strip":return new StripDecoder(n);case"Sequence":return new DecoderSequence(n);default:throw new Error(`Unknown Decoder type: ${n.type}`)}}_call(n){return this.decode(n)}decode(n){return this.decode_chain(n).join("")}decode_chain(n){throw Error("`decode_chain` should be implemented in subclass.")}}class ReplaceDecoder extends Decoder{constructor(n){super(n)}decode_chain(n){let a=createPattern(this.config.pattern);return a===null?n:n.map(u=>u.replaceAll(a,this.config.content))}}class ByteFallback extends Decoder{constructor(n){super(n),this.text_decoder=new TextDecoder}decode_chain(n){let a=[],u=[];for(let c of n){let p=null;if(c.length===6&&c.startsWith("<0x")&&c.endsWith(">")){let s=parseInt(c.slice(3,5),16);isNaN(s)||(p=s)}if(p!==null)u.push(p);else{if(u.length>0){let s=this.text_decoder.decode(Uint8Array.from(u));a.push(s),u=[]}a.push(c)}}if(u.length>0){let c=this.text_decoder.decode(Uint8Array.from(u));a.push(c),u=[]}return a}}class FuseDecoder extends Decoder{constructor(n){super(n)}decode_chain(n){return[n.join("")]}}class StripDecoder extends Decoder{constructor(n){super(n),this.content=this.config.content,this.start=this.config.start,this.stop=this.config.stop}decode_chain(n){return n.map(a=>{let u=0;for(let p=0;p(u!==0&&(a.startsWith(this.config.prefix)?a=a.replace(this.config.prefix,""):a=" "+a),this.cleanup&&(a=clean_up_tokenization(a)),a))}}class ByteLevelDecoder extends Decoder{constructor(n){super(n),this.byte_decoder=UNICODE_TO_BYTES,this.text_decoder=new TextDecoder("utf-8",{fatal:!1,ignoreBOM:!0}),this.end_of_word_suffix=null}convert_tokens_to_string(n){let a=n.join(""),u=new Uint8Array([...a].map(p=>this.byte_decoder[p]));return this.text_decoder.decode(u)}decode_chain(n){let a=[],u=[];for(let c of n)this.added_tokens.includes(c)?(u.length>0&&(a.push(this.convert_tokens_to_string(u)),u=[]),a.push(c)):u.push(c);return u.length>0&&a.push(this.convert_tokens_to_string(u)),a}}class DecoderSequence extends Decoder{constructor(n){super(n),this.decoders=n.decoders.map(a=>Decoder.fromConfig(a))}decode_chain(n){return this.decoders.reduce((a,u)=>u.decode_chain(a),n)}}class MetaspacePreTokenizer extends PreTokenizer{constructor(n){super(),this.addPrefixSpace=n.add_prefix_space,this.replacement=n.replacement,this.strRep=n.str_rep||this.replacement}pre_tokenize(n){typeof n=="string"&&(n=n.trimStart().split(/\s+/));const a=[];for(let u of n){let c=u.replaceAll(" ",this.strRep);this.addPrefixSpace&&!c.startsWith(this.replacement)&&(c=this.strRep+c),a.push(c)}return a}}class MetaspaceDecoder extends Decoder{constructor(n){super(n),this.addPrefixSpace=n.add_prefix_space,this.replacement=n.replacement}decode_chain(n){let a=[];for(let u=0;uPreTokenizer.fromConfig(a))}pre_tokenize_text(n){return typeof n=="string"&&(n=[n]),this.tokenizers.reduce((a,u)=>u.pre_tokenize(a),n)}}class WhitespaceSplit extends PreTokenizer{constructor(n){super()}pre_tokenize_text(n){return whitespace_split(n)}}class PreTrainedTokenizer extends Callable{constructor(n,a){super(),this.normalizer=Normalizer.fromConfig(n.normalizer),this.pre_tokenizer=PreTokenizer.fromConfig(n.pre_tokenizer),n.model.vocab&&(Array.isArray(n.model.vocab)||(n.model.vocab=Object.entries(n.model.vocab)),n.model.vocab=new Map(n.model.vocab)),this.model=TokenizerModel.fromConfig(n.model,a),this.post_processor=PostProcessor.fromConfig(n.post_processor),this.decoder=Decoder.fromConfig(n.decoder),this.decoder.end_of_word_suffix=this.model.end_of_word_suffix,this.special_tokens=[],this.all_special_ids=[],this.added_tokens=[];for(let u of n.added_tokens){let c=u.id,p=u.content;this.added_tokens.push(p),this.model.tokens_to_ids.set(p,c),this.model.vocab[c]=p,u.special&&(this.special_tokens.push(p),this.all_special_ids.push(c))}this.decoder.added_tokens=this.added_tokens,this.added_tokens_regex=new RegExp("("+this.added_tokens.map(escapeRegExp).join("|")+")"),this.mask_token=this.getToken(a,"mask_token"),this.mask_token_id=this.model.tokens_to_ids.get(this.mask_token),this.pad_token=this.getToken(a,"pad_token","eos_token"),this.pad_token_id=this.model.tokens_to_ids.get(this.pad_token),this.sep_token=this.getToken(a,"sep_token"),this.sep_token_id=this.model.tokens_to_ids.get(this.sep_token),this.model_max_length=a.model_max_length,this.remove_space=a.remove_space,this.clean_up_tokenization_spaces=a.clean_up_tokenization_spaces??!0,this.padding_side="right"}getToken(n,...a){for(let u of a){let c=n[u];if(c)if(typeof c=="object"){if(c.__type==="AddedToken")return c.content;throw Error(`Unknown token: ${c}`)}else return c}return null}static async from_pretrained(n,{progress_callback:a=null,config:u=null,cache_dir:c=null,local_files_only:p=!1,revision:s="main"}={}){let h=await loadTokenizer(n,{progress_callback:a,config:u,cache_dir:c,local_files_only:p,revision:s});return new this(...h)}prepare_model_inputs(n){return n}_call(n,{text_pair:a=null,padding:u=!1,truncation:c=null,max_length:p=null,return_tensor:s=!0}={}){let h;if(Array.isArray(n)){if(n.length===0)throw Error("text array must be non-empty");if(a!==null){if(Array.isArray(a)){if(n.length!==a.length)throw Error("text and text_pair must have the same length")}else throw Error("text_pair must also be an array");h=n.map((t,e)=>this.encode(t,a[e]))}else h=n.map(t=>this.encode(t))}else{if(n===null)throw Error("text may not be null");if(Array.isArray(a))throw Error("When specifying `text_pair`, since `text` is a string, `text_pair` must also be a string (i.e., not an array).");h=[this.encode(n,a)]}let f=max(h.map(t=>t.length))[0];p===null&&(p=f),p=Math.min(p,this.model_max_length);let l=[];if(u||c)for(let t=0;tp)c&&(h[t]=h[t].slice(0,p)),l.push(new Array(h[t].length).fill(1));else if(u){let e=p-h[t].length;this.padding_side==="right"?(l.push(new Array(h[t].length).fill(1).concat(new Array(e).fill(0))),h[t].push(...new Array(e).fill(this.pad_token_id))):(l.push(new Array(e).fill(0).concat(new Array(h[t].length).fill(1))),h[t].unshift(...new Array(e).fill(this.pad_token_id)))}else l.push(new Array(h[t].length).fill(1));else l=h.map(t=>new Array(t.length).fill(1));if(s){if(!(u&&c)&&h.some(e=>e.length!==h[0].length))throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=true' and 'truncation=true' to have batched tensors with the same length.");let t=[h.length,h[0].length];h=new Tensor("int64",BigInt64Array.from(h.flat().map(BigInt)),t),l=new Tensor("int64",BigInt64Array.from(l.flat().map(BigInt)),t)}else Array.isArray(n)||(h=h[0],l=l[0]);let o={input_ids:h,attention_mask:l};return o=this.prepare_model_inputs(o),o}_encode_text(n){return n===null?null:n.split(this.added_tokens_regex).filter(c=>c).map(c=>{if(this.added_tokens.includes(c))return c;{this.remove_space===!0&&(c=c.trim().split(/\s+/).join(" ")),this.normalizer!==null&&(c=this.normalizer(c));let p=this.pre_tokenizer!==null?this.pre_tokenizer(c):[c];return this.model(p)}}).flat()}encode(n,a=null){let u=this._encode_text(n),c=this._encode_text(a),p=this.post_processor!==null?this.post_processor(u,c):mergeArrays(u??[],c??[]);return this.model.convert_tokens_to_ids(p)}batch_decode(n,a={}){return n.map(u=>this.decode(u,a))}decode(n,a={}){if(!Array.isArray(n)||n.length===0||!isIntegralNumber(n[0]))throw Error("token_ids must be a non-empty array of integers.");return this.decode_single(n,a)}decode_single(n,{skip_special_tokens:a=!1,clean_up_tokenization_spaces:u=null}){let c=this.model.convert_ids_to_tokens(n);a&&(c=c.filter(s=>!this.special_tokens.includes(s)));let p=this.decoder(c);return this.decoder.end_of_word_suffix&&(p=p.replaceAll(this.decoder.end_of_word_suffix," "),a&&(p=p.trim())),(u??this.clean_up_tokenization_spaces)&&(p=clean_up_tokenization(p)),p}}function add_token_types(y){if(y.input_ids instanceof Tensor)y.token_type_ids=new Tensor("int64",new BigInt64Array(y.input_ids.data.length),y.input_ids.dims);else if(Array.isArray(y.input_ids))Array.isArray(y.input_ids[0])?y.token_type_ids=y.input_ids.map(n=>new Array(n.length).fill(0)):y.token_type_ids=new Array(y.input_ids.length).fill(0);else throw new Error("Input ids must be a Tensor or an Array");return y}class BertTokenizer extends PreTrainedTokenizer{prepare_model_inputs(n){return add_token_types(n)}}class AlbertTokenizer extends PreTrainedTokenizer{prepare_model_inputs(n){return add_token_types(n)}}class MobileBertTokenizer extends PreTrainedTokenizer{prepare_model_inputs(n){return add_token_types(n)}}class SqueezeBertTokenizer extends PreTrainedTokenizer{prepare_model_inputs(n){return add_token_types(n)}}class DistilBertTokenizer extends PreTrainedTokenizer{}class T5Tokenizer extends PreTrainedTokenizer{}class GPT2Tokenizer extends PreTrainedTokenizer{}class BartTokenizer extends PreTrainedTokenizer{}class RobertaTokenizer extends PreTrainedTokenizer{}class BloomTokenizer extends PreTrainedTokenizer{}class LlamaTokenizer extends PreTrainedTokenizer{}class XLMRobertaTokenizer extends PreTrainedTokenizer{}class MPNetTokenizer extends PreTrainedTokenizer{}class FalconTokenizer extends PreTrainedTokenizer{prepare_model_inputs(n){return add_token_types(n)}}class GPTNeoXTokenizer extends PreTrainedTokenizer{}class NllbTokenizer extends PreTrainedTokenizer{constructor(n,a){super(n,a),this.languageRegex=/^[a-z]{3}_[A-Z][a-z]{3}$/,this.language_codes=this.special_tokens.filter(u=>this.languageRegex.test(u))}_build_translation_inputs(n,a,u){if(!this.language_codes.includes(u.tgt_lang))throw new Error(`Target language code "${u.tgt_lang}" is not valid. Must be one of: {${this.language_codes.join(", ")}}`);if(u.src_lang!==void 0){if(!this.language_codes.includes(u.src_lang))throw new Error(`Source language code "${u.src_lang}" is not valid. Must be one of: {${this.language_codes.join(", ")}}`);for(let c of this.post_processor.config.single)if("SpecialToken"in c&&this.languageRegex.test(c.SpecialToken.id)){c.SpecialToken.id=u.src_lang;break}}return u.forced_bos_token_id=this.model.convert_tokens_to_ids([u.tgt_lang])[0],this._call(n,a)}}const WHISPER_LANGUAGES=[["en","english"],["zh","chinese"],["de","german"],["es","spanish"],["ru","russian"],["ko","korean"],["fr","french"],["ja","japanese"],["pt","portuguese"],["tr","turkish"],["pl","polish"],["ca","catalan"],["nl","dutch"],["ar","arabic"],["sv","swedish"],["it","italian"],["id","indonesian"],["hi","hindi"],["fi","finnish"],["vi","vietnamese"],["he","hebrew"],["uk","ukrainian"],["el","greek"],["ms","malay"],["cs","czech"],["ro","romanian"],["da","danish"],["hu","hungarian"],["ta","tamil"],["no","norwegian"],["th","thai"],["ur","urdu"],["hr","croatian"],["bg","bulgarian"],["lt","lithuanian"],["la","latin"],["mi","maori"],["ml","malayalam"],["cy","welsh"],["sk","slovak"],["te","telugu"],["fa","persian"],["lv","latvian"],["bn","bengali"],["sr","serbian"],["az","azerbaijani"],["sl","slovenian"],["kn","kannada"],["et","estonian"],["mk","macedonian"],["br","breton"],["eu","basque"],["is","icelandic"],["hy","armenian"],["ne","nepali"],["mn","mongolian"],["bs","bosnian"],["kk","kazakh"],["sq","albanian"],["sw","swahili"],["gl","galician"],["mr","marathi"],["pa","punjabi"],["si","sinhala"],["km","khmer"],["sn","shona"],["yo","yoruba"],["so","somali"],["af","afrikaans"],["oc","occitan"],["ka","georgian"],["be","belarusian"],["tg","tajik"],["sd","sindhi"],["gu","gujarati"],["am","amharic"],["yi","yiddish"],["lo","lao"],["uz","uzbek"],["fo","faroese"],["ht","haitian creole"],["ps","pashto"],["tk","turkmen"],["nn","nynorsk"],["mt","maltese"],["sa","sanskrit"],["lb","luxembourgish"],["my","myanmar"],["bo","tibetan"],["tl","tagalog"],["mg","malagasy"],["as","assamese"],["tt","tatar"],["haw","hawaiian"],["ln","lingala"],["ha","hausa"],["ba","bashkir"],["jw","javanese"],["su","sundanese"]],WHISPER_LANGUAGE_MAPPING=new Map(WHISPER_LANGUAGES),WHISPER_TO_LANGUAGE_CODE_MAPPING=new Map([...WHISPER_LANGUAGES.map(([y,n])=>[n,y]),["burmese","my"],["valencian","ca"],["flemish","nl"],["haitian","ht"],["letzeburgesch","lb"],["pushto","ps"],["panjabi","pa"],["moldavian","ro"],["moldovan","ro"],["sinhalese","si"],["castilian","es"]]);class WhisperTokenizer extends PreTrainedTokenizer{_decode_asr(n,{return_timestamps:a=!1,return_language:u=!1,time_precision:c=null,force_full_sequences:p=!0}={}){if(c===null)throw Error("Must specify time_precision");let s=null;function h(){return{language:s,timestamp:[null,null],text:""}}const f=[];let l=h(),o=0;const t=this.model.convert_tokens_to_ids(["<|notimestamps|>"])[0]+1;let e=[],r=!1,i=null;const d=new Set(this.all_special_ids);for(let b of n){const _=b.tokens;let v=null,w=t;if("stride"in b){const[A,O,x]=b.stride;if(o-=O,i=A-x,O&&(w=O/c+t),x)for(let I=_.length-1;I>=0;--I){const N=_[I];if(N>=t){if(v!==null&&(N-t)*c=t){const O=(A-t)*c+o,x=Math.round(O*100)/100;if(v!==null&&A>=v)r=!0;else if(r||e.length>0&&A0?e.push(S):e.every(A=>A.length===0)&&(l=h(),e=[],S=[])}if(e.length>0){if(p&&a)throw new Error("There was an error while processing timestamps, we haven't found a timestamp as last token.");const b=this.findLongestCommonSequence(e),_=this.decode(b);l.text=_,f.push(l)}let g=Object.create(null);const m=f.map(b=>b.text).join("");if(a||u){for(let b=0;bI===A[N]).length,x=O/g+m;O>1&&x>h&&(h=x,f=[b,_,w,S])}const[o,t,e,r]=f,i=Math.floor((t+o)/2),d=Math.floor((r+e)/2);c.push(...a.slice(0,i)),a=s.slice(d),u=a.length}return c.push(...a),c}get_decoder_prompt_ids({language:n=null,task:a=null,no_timestamps:u=!0}={}){let c=[];if(n){n=n.toLowerCase();let p=WHISPER_TO_LANGUAGE_CODE_MAPPING.get(n);if(p===void 0)if(WHISPER_LANGUAGE_MAPPING.has(n))p=n;else{const f=n.length===2?WHISPER_LANGUAGE_MAPPING.keys():WHISPER_LANGUAGE_MAPPING.values();throw new Error(`Language "${n}" is not supported. Must be one of: ${JSON.stringify(f)}`)}let s=this.model.tokens_to_ids.get(`<|${p}|>`);if(s===void 0)throw new Error(`Unable to find language "${p}" in model vocabulary. Please report this issue at https://github.com/xenova/transformers.js/issues/new/choose.`);c.push(s)}else c.push(null);if(a){if(a=a.toLowerCase(),a!=="transcribe"&&a!=="translate")throw new Error(`Task "${a}" is not supported. Must be one of: ["transcribe", "translate"]`);let p=this.model.tokens_to_ids.get(`<|${a}|>`);if(p===void 0)throw new Error(`Unable to find task "${a}" in model vocabulary. Please report this issue at https://github.com/xenova/transformers.js/issues/new/choose.`);c.push(p)}else c.push(null);if(u){let p=this.model.tokens_to_ids.get("<|notimestamps|>");if(p===void 0)throw new Error('Unable to find "<|notimestamps|>" in model vocabulary. Please report this issue at https://github.com/xenova/transformers.js/issues/new/choose.');c.push(p)}return c.map((p,s)=>[s+1,p]).filter(p=>p[1]!==null)}}class CodeGenTokenizer extends PreTrainedTokenizer{}class CLIPTokenizer extends PreTrainedTokenizer{}class MarianTokenizer extends PreTrainedTokenizer{constructor(n,a){super(n,a),this.languageRegex=/^(>>\w+<<)\s*/g,this.supported_language_codes=this.model.vocab.filter(u=>this.languageRegex.test(u)),console.warn('WARNING: `MarianTokenizer` is not yet supported by Hugging Face\'s "fast" tokenizers library. Therefore, you may experience slightly inaccurate results.')}_encode_text(n){if(n===null)return null;let[a,...u]=n.trim().split(this.languageRegex);if(u.length===0)return super._encode_text(a);if(u.length===2){let[c,p]=u;return this.supported_language_codes.includes(c)||console.warn(`Unsupported language code "${c}" detected, which may lead to unexpected behavior. Should be one of: ${JSON.stringify(this.supported_language_codes)}`),mergeArrays([c],super._encode_text(p))}}}class CharTrie{constructor(){this.root=CharTrieNode.default()}extend(n){for(let a of n)this.push(a)}push(n){let a=this.root;for(let u of n){let c=a.children.get(u);c===void 0&&(c=CharTrieNode.default(),a.children.set(u,c)),a=c}a.isLeaf=!0}*commonPrefixSearch(n){let a=this.root,u="";for(let c=0;cf)&&(l=o.clone(),f=t)}if(l!==null)h.prev=l,h.backtraceScore=f;else return[]}++a}const u=[],p=this.beginNodes[n][0].prev;if(p===null)return[];let s=p.clone();for(;s.prev!==null;)u.push(s.clone()),s=s.clone().prev.clone();return u.reverse(),u}piece(n){return this.sentence.slice(n.pos,n.pos+n.length)}tokens(){return this.viterbi().map(a=>this.piece(a))}tokenIds(){return this.viterbi().map(a=>a.tokenId)}}class TokenLatticeNode{constructor(n,a,u,c,p){this.tokenId=n,this.nodeId=a,this.pos=u,this.length=c,this.score=p,this.prev=null,this.backtraceScore=0}clone(){const n=new TokenLatticeNode(this.tokenId,this.nodeId,this.pos,this.length,this.score);return n.prev=this.prev,n.backtraceScore=this.backtraceScore,n}}class AutoTokenizer{static async from_pretrained(n,{quantized:a=!0,progress_callback:u=null,config:c=null,cache_dir:p=null,local_files_only:s=!1,revision:h="main"}={}){let[f,l]=await loadTokenizer(n,{quantized:a,progress_callback:u,config:c,cache_dir:p,local_files_only:s,revision:h}),o=l.tokenizer_class.replace(/Fast$/,""),t=this.TOKENIZER_CLASS_MAPPING[o];return t||(console.warn(`Unknown tokenizer class "${o}", attempting to construct from base class.`),t=PreTrainedTokenizer),new t(f,l)}}Se(AutoTokenizer,"TOKENIZER_CLASS_MAPPING",{T5Tokenizer,DistilBertTokenizer,BertTokenizer,MobileBertTokenizer,SqueezeBertTokenizer,AlbertTokenizer,GPT2Tokenizer,BartTokenizer,RobertaTokenizer,WhisperTokenizer,CodeGenTokenizer,CLIPTokenizer,MarianTokenizer,BloomTokenizer,NllbTokenizer,LlamaTokenizer,XLMRobertaTokenizer,MPNetTokenizer,FalconTokenizer,GPTNeoXTokenizer,PreTrainedTokenizer});async function loadConfig(y,n){return await getModelJSON(y,"config.json",!0,n)}class PretrainedConfig{constructor(n){this.model_type=null,this.is_encoder_decoder=!1,Object.assign(this,n)}static async from_pretrained(n,{progress_callback:a=null,config:u=null,cache_dir:c=null,local_files_only:p=!1,revision:s="main"}={}){let h=u??await loadConfig(n,{progress_callback:a,config:u,cache_dir:c,local_files_only:p,revision:s});return new this(h)}}class AutoConfig{static async from_pretrained(...n){return PretrainedConfig.from_pretrained(...n)}}class LogitsProcessorList extends Callable{constructor(){super(),this.processors=[]}push(n){this.processors.push(n)}extend(n){this.processors.push(...n)}_call(n,a){for(let u of a)this.processors.forEach(c=>c(n,u))}[Symbol.iterator](){return this.processors.values()}}class LogitsProcessor extends Callable{_call(n,a){throw Error("`_call` should be implemented in a subclass")}}class ForceTokensLogitsProcessor extends LogitsProcessor{constructor(n){super(),this.force_token_map=Object.fromEntries(n??[])}_call(n,a){let u=this.force_token_map[n.length];return exists(u)&&(a.data.fill(-1/0),a.data[u]=0),a}}class ForcedBOSTokenLogitsProcessor extends LogitsProcessor{constructor(n){super(),this.bos_token_id=n}_call(n,a){return n.length===1&&(a.data.fill(-1/0),a.data[this.bos_token_id]=0),a}}class ForcedEOSTokenLogitsProcessor extends LogitsProcessor{constructor(n,a){super(),this.max_length=n,this.forced_eos_token_id=a}_call(n,a){}}class SuppressTokensAtBeginLogitsProcessor extends LogitsProcessor{constructor(n,a){super(),this.begin_suppress_tokens=n,this.begin_index=a}_call(n,a){if(n.length===this.begin_index)for(let u of this.begin_suppress_tokens)a.data[u]=-1/0;return a}}class WhisperTimeStampLogitsProcessor extends LogitsProcessor{constructor(n){super(),this.eos_token_id=n.eos_token_id,this.no_timestamps_token_id=n.no_timestamps_token_id,this.timestamp_begin=this.no_timestamps_token_id+1,this.begin_index=(n.forced_decoder_ids||[]).length+2,n.forced_decoder_ids.slice(-1)[0][1]===this.no_timestamps_token_id&&(this.begin_index-=1),this.max_initial_timestamp_index=n.max_initial_timestamp_index}_call(n,a){if(a.data[this.no_timestamps_token_id]=-1/0,n.length===this.begin_index-1)return a.data.fill(-1/0),a.data[this.timestamp_begin]=0,a;const u=n.slice(this.begin_index),c=u.length>=1&&u[u.length-1]>=this.timestamp_begin,p=u.length<2||u[u.length-2]>=this.timestamp_begin;if(c&&(p?a.data.subarray(this.timestamp_begin).fill(-1/0):a.data.subarray(0,this.eos_token_id).fill(-1/0)),n.length===this.begin_index&&this.max_initial_timestamp_index!==null){const l=this.timestamp_begin+this.max_initial_timestamp_index;a.data.subarray(l+1).fill(-1/0)}const s=log_softmax(a.data),h=Math.log(s.subarray(this.timestamp_begin).map(Math.exp).reduce((l,o)=>l+o)),f=max(s.subarray(0,this.timestamp_begin))[0];return h>f&&a.data.subarray(0,this.timestamp_begin).fill(-1/0),a}}class NoRepeatNGramLogitsProcessor extends LogitsProcessor{constructor(n){super(),this.no_repeat_ngram_size=n}getNgrams(n){const a=n.length,u=[];for(let p=0;p0&&(c=c.map(p=>p/this.generation_config.temperature)),c}randomSelect(n){let a=n.reduce((c,p)=>c+p,0),u=Math.random()*a;for(let c=0;c1)return new BeamSearchSampler(n);if(n.num_return_sequences>1)throw Error(`num_return_sequences has to be 1 when doing greedy search, but is ${n.num_return_sequences}.`);return new GreedySampler(n)}}class GreedySampler extends Sampler{sample(n,a=-1){let u=this.getLogits(n,a);return[[max(u)[1],0]]}}class MultinomialSampler extends Sampler{sample(n,a=-1){let u=n.dims.at(-1);this.generation_config.top_k>0&&(u=Math.min(this.generation_config.top_k,u));const c=this.getLogits(n,a),p=getTopItems(c,u),s=softmax(p.map(h=>h[1]));return Array.from({length:this.generation_config.num_beams},()=>{const h=this.randomSelect(s);return[p[h][0],Math.log(s[h])]})}}class BeamSearchSampler extends Sampler{sample(n,a=-1){let u=n.dims.at(-1);this.generation_config.top_k>0&&(u=Math.min(this.generation_config.top_k,u));const c=this.getLogits(n,a),p=getTopItems(c,u),s=softmax(p.map(h=>h[1]));return Array.from({length:this.generation_config.num_beams},(h,f)=>[p[f][0],Math.log(s[f])])}}const{InferenceSession,Tensor:ONNXTensor}=ONNX;class ModelType{}class EncoderOnlyModelType extends ModelType{}class EncoderDecoderModelType extends ModelType{}class Seq2SeqModelType extends EncoderDecoderModelType{}class DecoderOnlyModelType extends ModelType{}const MODEL_TYPE_MAPPING=new Map,MODEL_CLASS_MAPPING=new Map;async function forward(y,n){return MODEL_TYPE_MAPPING.get(y.constructor.name)===DecoderOnlyModelType?await decoderForward(y,n):await encoderForward(y,n)}async function constructSession(y,n,a){let u=`onnx/${n}${a.quantized?"_quantized":""}.onnx`,c=await getModelFile(y,u,!0,a);try{return await InferenceSession.create(c,{executionProviders})}catch(p){if(executionProviders.length===1&&executionProviders[0]==="wasm")throw p;return console.warn(p),console.warn("Something went wrong during model construction (most likely a missing operation). Using `wasm` as a fallback. "),await InferenceSession.create(c,{executionProviders:["wasm"]})}}async function validateInputs(y,n){const a={},u=[];for(let s of y.inputNames)n[s]===void 0?u.push(s):a[s]=n[s];if(u.length>0)throw new Error(`An error occurred during model execution: "Missing the following inputs: ${u.join(", ")}.`);const c=Object.keys(n).length,p=y.inputNames.length;if(c>p){let s=Object.keys(n).filter(h=>!y.inputNames.includes(h));console.warn(`WARNING: Too many inputs were provided (${c} > ${p}). The following inputs will be ignored: "${s.join(", ")}".`)}return a}async function sessionRun(y,n){const a=await validateInputs(y,n);try{let u=await y.run(a);return u=replaceTensors(u),u}catch(u){throw console.error(`An error occurred during model execution: "${u}".`),console.error("Inputs given to model:",a),u}}function replaceTensors(y){for(let n in y)y[n]instanceof ONNXTensor?y[n]=new Tensor(y[n]):typeof y[n]=="object"&&replaceTensors(y[n]);return y}function toI64Tensor(y){if(y instanceof Tensor)return y;if(y.length===0)throw Error("items must be non-empty");if(Array.isArray(y[0])){if(y.some(n=>n.length!==y[0].length))throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' and/or 'truncation=True' to have batched tensors with the same length.");return new Tensor("int64",BigInt64Array.from(y.flat().map(n=>BigInt(n))),[y.length,y[0].length])}else return new Tensor("int64",BigInt64Array.from(y.map(n=>BigInt(n))),[1,y.length])}function prepareAttentionMask(y,n){let a=y.config.pad_token_id??null,u=y.config.eos_token_id??null;isIntegralNumber(u)&&(u=[u]);let c=n.indexOf(a)!==-1,p=u===null||!u.includes(a);if(c&&p){let s=BigInt64Array.from(n.data.map(h=>h!=a));return new Tensor("int64",s,n.dims)}else return new Tensor("int64",new BigInt64Array(n.data.length).fill(1n),n.dims)}function boolTensor(y){return new Tensor("bool",[y],[1])}async function seq2seqForward(y,n,{add_decoder_pkv:a=!0}={}){let{encoder_outputs:u,past_key_values:c}=n;u||(u=(await encoderForward(y,n)).last_hidden_state);let p={input_ids:n.decoder_input_ids,encoder_hidden_states:u,use_cache_branch:boolTensor(c!==null)};y.decoder_merged_session.inputNames.includes("encoder_attention_mask")&&(p.encoder_attention_mask=n.attention_mask),y.addPastKeyValues(p,c,a);const s=await sessionRun(y.decoder_merged_session,p);let h=s.logits;return c=y.getPastKeyValues(s,c),new Seq2SeqLMOutput({logits:h,past_key_values:c,encoder_outputs:u})}function seq2seqStartBeams(y,n,a,u=!0){let c=[],p=0,s=y.config.decoder_start_token_id;Array.isArray(s)||(s=[s]);for(let h of n){h.dims=[1,...h.dims];let f={inputs:h,encoder_outputs:null,past_key_values:null,output_token_ids:s,done:!1,score:0,id:p++};u&&(f.attention_mask=prepareAttentionMask(y,h)),c.push(f)}return c}async function seq2seqRunBeam(y,n,{input_name:a="input_ids"}={}){let u={[a]:n.inputs,decoder_input_ids:toI64Tensor(n.output_token_ids.slice(-1)),encoder_outputs:n.encoder_outputs,past_key_values:n.past_key_values};n.attention_mask&&(u.attention_mask=n.attention_mask);let c=await y.forward(u);return n.past_key_values=c.past_key_values,n.encoder_outputs=c.encoder_outputs,c}async function encoderForward(y,n){let a={};for(let u of y.session.inputNames)a[u]=n[u];return await sessionRun(y.session,a)}async function decoderForward(y,n){let a=n.past_key_values,u={input_ids:n.input_ids,attention_mask:n.attention_mask??prepareAttentionMask(y,n.input_ids),use_cache_branch:boolTensor(a!==null)};y.addPastKeyValues(u,a);let c=await sessionRun(y.session,u),p=c.logits;return a=y.getPastKeyValues(c,a),{logits:p,past_key_values:a}}function decoderStartBeams(y,n,a,u){let c=[],p=0;for(let s of n){let h=s.tolist().map(Number);s.dims=[1,...s.dims];let f;u?(f=u[p],f.dims=[1,...f.dims]):f=prepareAttentionMask(y,s);let l={input:s,model_input_ids:s,attention_mask:f,past_key_values:null,output_token_ids:h,num_output_tokens:a,done:!1,score:0,id:p++};c.push(l)}return c}async function decoderRunBeam(y,n){let a=new BigInt64Array(n.output_token_ids.length).fill(1n),u={input_ids:n.model_input_ids,attention_mask:new Tensor("int64",a,[1,a.length]),past_key_values:n.past_key_values},c=await y.forward(u);return n.past_key_values=c.past_key_values,c}function decoderUpdatebeam(y,n){y.output_token_ids=[...y.output_token_ids,n],y.model_input_ids=new Tensor("int64",[BigInt(n)],[1,1])}class PreTrainedModel extends Callable{constructor(n,a){super(),this.config=n,this.session=a}async dispose(){let n=[];for(let a of Object.keys(this)){let u=this[a];u instanceof InferenceSession&&n.push(u.handler.dispose())}return await Promise.all(n)}static async from_pretrained(n,{quantized:a=!0,progress_callback:u=null,config:c=null,cache_dir:p=null,local_files_only:s=!1,revision:h="main"}={}){let f={quantized:a,progress_callback:u,config:c,cache_dir:p,local_files_only:s,revision:h},l=MODEL_TYPE_MAPPING.get(this.name),o;if(l===DecoderOnlyModelType)o=await Promise.all([AutoConfig.from_pretrained(n,f),constructSession(n,"decoder_model_merged",f)]);else if(l===Seq2SeqModelType)o=await Promise.all([AutoConfig.from_pretrained(n,f),constructSession(n,"encoder_model",f),constructSession(n,"decoder_model_merged",f),getModelJSON(n,"generation_config.json",!1,f)]);else if(l===EncoderDecoderModelType)o=await Promise.all([AutoConfig.from_pretrained(n,f),constructSession(n,"encoder_model",f),constructSession(n,"decoder_model_merged",f)]);else if(l===EncoderOnlyModelType)o=await Promise.all([AutoConfig.from_pretrained(n,f),constructSession(n,"model",f)]);else throw console.warn("Malformed class definition.",this),Error(`Unable to load model: ${n}. Please report this bug at https://github.com/xenova/transformers.js/issues/new/choose.`);return new this(...o)}async _call(n){return await this.forward(n)}async forward(n){return await forward(this,n)}_get_logits_processor(n,a,u=null){const c=new LogitsProcessorList;if(n.repetition_penalty!==null&&n.repetition_penalty!==1&&c.push(new RepetitionPenaltyLogitsProcessor(n.repetition_penalty)),n.no_repeat_ngram_size!==null&&n.no_repeat_ngram_size>0&&c.push(new NoRepeatNGramLogitsProcessor(n.no_repeat_ngram_size)),n.forced_bos_token_id!==null&&c.push(new ForcedBOSTokenLogitsProcessor(n.forced_bos_token_id)),n.forced_eos_token_id!==null&&c.push(new ForcedEOSTokenLogitsProcessor(n.max_length,n.forced_eos_token_id)),n.begin_suppress_tokens!==null){let p=a>1||n.forced_bos_token_id===null?a:a+1;n.forced_decoder_ids!==null&&(p+=n.forced_decoder_ids[n.forced_decoder_ids.length-1][0]),c.push(new SuppressTokensAtBeginLogitsProcessor(n.begin_suppress_tokens,p))}return n.forced_decoder_ids!==null&&c.push(new ForceTokensLogitsProcessor(n.forced_decoder_ids)),u!==null&&c.extend(u),c}_get_generation_config(n){let a=new GenerationConfig;return"generation_config"in this&&Object.assign(a,this.generation_config),n!==null&&Object.assign(a,n),a}async generate(n,a=null,u=null,{inputs_attention_mask:c=null}={}){if(!(n instanceof Tensor)&&!isTypedArray(n)&&!Array.isArray(n))throw Error(`\`inputs\` must be a Tensor, TypedArray, or Array, but is "${n.constructor.name}".`);let p;if(this.config.is_encoder_decoder)p=0;else if(p=n instanceof Tensor?n.dims[0]:n.length,p===0)throw Error("Must supply a non-empty array of input token ids.");a=this._get_generation_config(a),u=u??new LogitsProcessorList,u=this._get_logits_processor(a,p,u);let s=1;const h=s+(a.max_new_tokens??1/0),f=Number.isInteger(a.max_length)&&(a.max_new_tokens??null)===null;let l=Sampler.getSampler(a),o=this.getStartBeams(n,s,c);for(;o.some(t=>!t.done)&&s=a.max_length){e.done=!0,t.push(e);continue}let i=(await this.runBeam(e)).logits.slice(null,-1,null);u(e.output_token_ids,i);let d=l(i);for(let[g,m]of d){let b={...e};this.updateBeam(b,g),b.score+=m,g===this.config.eos_token_id&&(b.done=!0),t.push(b)}}++s,t=this.groupBeams(t).map(e=>e.sort((r,i)=>i.score-r.score).slice(0,a.num_beams)),o=t.flat(),a.callback_function&&a.callback_function(o)}return this.groupBeams(o).map(t=>a.num_return_sequences>1?t.slice(0,a.num_return_sequences).map(e=>e.output_token_ids):[t[0].output_token_ids]).flat()}groupBeams(n){const a=Object.create(null);for(const u of n)a[u.id]===void 0?a[u.id]=[u]:a[u.id].push(u);return Object.values(a)}getPastKeyValues(n,a){const u=Object.create(null);for(const c in n)if(c.startsWith("present")){let p=c.replace("present","past_key_values");a!==null&&c.includes("encoder")?u[p]=a[p]:u[p]=n[c]}return u}addPastKeyValues(n,a,u=!1){if(a)Object.assign(n,a);else if(u){let c=[1,this.num_encoder_heads,0,this.encoder_dim_kv];for(let s=0;s{if(!self.OffscreenCanvas)throw new Error("OffscreenCanvas not supported by this browser.");return new self.OffscreenCanvas(y,n)},loadImageFunction=self.createImageBitmap,ImageDataClass=self.ImageData;else if(fs)loadImageFunction=async y=>{let{data:n,info:a}=await y.raw().toBuffer({resolveWithObject:!0});return new RawImage(new Uint8ClampedArray(n),a.width,a.height,a.channels)};else throw new Error("Unable to load image processing library.");const RESAMPLING_MAPPING={0:"nearest",1:"lanczos",2:"bilinear",3:"bicubic",4:"box",5:"hamming"};class RawImage{constructor(n,a,u,c){this._update(n,a,u,c)}static async read(n){if(n instanceof RawImage)return n;if(isString(n)||n instanceof URL)return await this.fromURL(n);throw new Error(`Unsupported input type: ${typeof n}`)}static async fromURL(n){let u=await(await getFile(n)).blob();return this.fromBlob(u)}static async fromBlob(n){if(BROWSER_ENV){let a=await loadImageFunction(n);const u=createCanvasFunction(a.width,a.height).getContext("2d");return u.drawImage(a,0,0),new this(u.getImageData(0,0,a.width,a.height).data,a.width,a.height,4)}else{let a=fs(await n.arrayBuffer());return await loadImageFunction(a)}}grayscale(){if(this.channels===1)return this;let n=new Uint8ClampedArray(this.width*this.height*1);switch(this.channels){case 3:case 4:for(let a=0,u=0;a=0?f=u:o=-u,c>=0?l=c:t=-c,h.drawImage(s,f,l,n,a,o,t,n,a),new RawImage(h.getImageData(0,0,n,a).data,n,a,4).convert(p)}else{let p=fs(this.data,{raw:{width:this.width,height:this.height,channels:this.channels}});if(u>=0&&c>=0)p=p.extract({left:Math.floor(u),top:Math.floor(c),width:n,height:a});else if(u<=0&&c<=0){let s=Math.floor(-c),h=Math.floor(-u);p=p.extend({top:s,left:h,right:n-this.width-h,bottom:a-this.height-s})}else{let s=[0,0],h=0;c<0?(s[0]=Math.floor(-c),s[1]=a-this.height-s[0]):h=Math.floor(c);let f=[0,0],l=0;u<0?(f[0]=Math.floor(-u),f[1]=n-this.width-f[0]):l=Math.floor(u),p=p.extend({top:s[0],bottom:s[1],left:f[0],right:f[1]}).extract({left:l,top:h,width:n,height:a})}return await loadImageFunction(p)}}toCanvas(){let n=this.clone().rgba(),a=createCanvasFunction(n.width,n.height),u=new ImageDataClass(n.data,n.width,n.height);return a.getContext("2d").putImageData(u,0,0),a}_update(n,a,u,c=null){return this.data=n,this.width=a,this.height=u,c!==null&&(this.channels=c),this}clone(){return new RawImage(this.data.slice(),this.width,this.height,this.channels)}convert(n){if(this.channels===n)return this;switch(n){case 1:this.grayscale();break;case 3:this.rgb();break;case 4:this.rgba();break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this}save(n,a="image/png"){if(!env.useFS)throw new Error("Unable to save the image because filesystem is disabled in this environment.");const c=this.toCanvas().toBuffer(a);fs.writeFileSync(n,c)}}async function read_audio(y,n){if(typeof AudioContext>"u")throw Error("Unable to load audio from path/URL since `AudioContext` is not available in your environment. Instead, audio data should be passed directly to the pipeline/processor. For more information and some example code, see https://huggingface.co/docs/transformers.js/tutorials/node-audio-processing.");const a=await(await getFile(y)).arrayBuffer(),u=new AudioContext({sampleRate:n});typeof n>"u"&&console.warn(`No sampling rate provided, using default of ${u.sampleRate}Hz.`);const c=await u.decodeAudioData(a);let p;if(c.numberOfChannels===2){const s=Math.sqrt(2);let h=c.getChannelData(0),f=c.getChannelData(1);p=new Float32Array(h.length);for(let l=0;l=i?e[b]=r*Math.exp(d*(_-i)):e[b]=o+t*_,g[b]=p.map(v=>e[b]-v)}const m=e.slice(1).map((b,_)=>1/(b-e[_]));for(let b=0;bthis.preprocess(c)));return a.forEach(c=>c.pixel_values.dims=[1,...c.pixel_values.dims]),{pixel_values:cat(a.map(c=>c.pixel_values)),original_sizes:a.map(c=>c.original_size),reshaped_input_sizes:a.map(c=>c.reshaped_input_size)}}}class ViTFeatureExtractor extends ImageFeatureExtractor{}class MobileViTFeatureExtractor extends ImageFeatureExtractor{}class DetrFeatureExtractor extends ImageFeatureExtractor{async _call(n){let a=await super._call(n),u=[a.pixel_values.dims[0],64,64];return a.pixel_mask=new Tensor("int64",new BigInt64Array(u.reduce((c,p)=>c*p)).fill(1n),u),a}center_to_corners_format([n,a,u,c]){return[n-u/2,a-c/2,n+u/2,a+c/2]}post_process_object_detection(n,a=.5,u=null){const c=n.logits,p=n.pred_boxes,[s,h,f]=c.dims;if(u!==null&&u.length!==s)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");let l=[];for(let o=0;oa){let v=i[d].data;v=this.center_to_corners_format(v),t!==null&&(v=v.map((w,S)=>w*t[(S+1)%2])),e.boxes.push(v),e.classes.push(m),e.scores.push(_)}}l.push(e)}return l}remove_low_and_no_objects(n,a,u,c){let p=[],s=[],h=[];for(let f=0;fu&&(p.push(o),s.push(r),h.push(t))}return[p,s,h]}check_segment_validity(n,a,u,c=.5,p=.8){let s=[],h=0,f=0;for(let o=0;o=c&&++f;let l=h>0&&f>0;return l&&(l=h/f>p),[l,s]}compute_segments(n,a,u,c,p,s=null,h=null){let[f,l]=h??n[0].dims,o=new Tensor("int32",new Int32Array(f*l),[f,l]),t=[];if(h!==null)for(let d=0;dr[m]&&(e[m]=d,r[m]=n[d].data[m])}let i=0;for(let d=0;dBigInt(Math.round(f)))),s);return{pixel_values:u,original_sizes:c,reshaped_input_sizes:p,input_points:h}}post_process_masks(n,a,u,{mask_threshold:c=0,binarize:p=!0,pad_size:s=null}={}){let h=[];s=s??this.pad_size;let f=[s.height,s.width];for(let l=0;lb>c),m.dims)),m.dims=[1,...m.dims],r.push(m)}let i=cat(r);h.push(i)}return h}}class WhisperFeatureExtractor extends FeatureExtractor{constructor(n){var a;super(n),(a=this.config).mel_filters??(a.mel_filters=getMelFilters(this.config.sampling_rate,this.config.n_fft,this.config.feature_size))}calcOffset(n,a){return Math.abs((n+a)%(2*a)-a)}padReflect(n,a,u){const c=new Float32Array(n.length+a+u),p=n.length-1;for(let s=0;s>1;++w){const S=(w+1-u)**2/2,A=Math.sqrt(m**2+b**2)**S,O=S*Math.atan2(b,m);let x=2*w;l[x]=A*Math.cos(O),l[x+1]=A*Math.sin(O),o[x]=l[x],o[x+1]=-l[x+1]}const _=l.subarray(c,p),v=new FFT(s>>1);v.transform(r,o);for(let w=0;w>1,N=S[I]*a[I];t[O]=N*_[O],t[x]=N*_[x]}v.transform(i,t);for(let O=0;Oc?s-c:0,l=s>1,l=new Float32Array(h*f);for(let m=0;mthis.config.n_samples&&console.warn("Attempting to extract features for audio longer than 30 seconds. If using a pipeline to extract transcript from a long audio clip, remember to specify `chunk_length_s` and/or `stride_length_s`.");let a=n.slice(0,this.config.n_samples),u=this._extract_fbank_features(a);return{input_features:new Tensor("float32",u.data,[1,...u.dims])}}}class Processor extends Callable{constructor(n){super(),this.feature_extractor=n}async _call(n){return await this.feature_extractor(n)}}class SamProcessor extends Processor{async _call(n,a){return await this.feature_extractor(n,a)}post_process_masks(...n){return this.feature_extractor.post_process_masks(...n)}}class WhisperProcessor extends Processor{async _call(n){return await this.feature_extractor(n)}}class AutoProcessor{static async from_pretrained(n,{progress_callback:a=null,config:u=null,cache_dir:c=null,local_files_only:p=!1,revision:s="main"}={}){let h=u??await getModelJSON(n,"preprocessor_config.json",!0,{progress_callback:a,config:u,cache_dir:c,local_files_only:p,revision:s}),f=h.feature_extractor_type??h.image_processor_type,l=this.FEATURE_EXTRACTOR_CLASS_MAPPING[f];if(!l)if(h.size!==void 0)console.warn("Feature extractor type not specified, assuming ImageFeatureExtractor due to size parameter in config."),l=ImageFeatureExtractor;else throw new Error(`Unknown Feature Extractor type: ${h.feature_extractor_type}`);let o=this.PROCESSOR_CLASS_MAPPING[h.processor_class]??Processor,t=new l(h);return new o(t)}}Se(AutoProcessor,"FEATURE_EXTRACTOR_CLASS_MAPPING",{WhisperFeatureExtractor,ViTFeatureExtractor,MobileViTFeatureExtractor,DetrFeatureExtractor,SamImageProcessor}),Se(AutoProcessor,"PROCESSOR_CLASS_MAPPING",{WhisperProcessor,SamProcessor});async function prepareImages(y){return Array.isArray(y)||(y=[y]),y=await Promise.all(y.map(n=>RawImage.read(n))),y}class Pipeline extends Callable{constructor(n,a,u){super(),this.task=n,this.tokenizer=a,this.model=u}async dispose(){await this.model.dispose()}async _call(n){let a=this.tokenizer(n,{padding:!0,truncation:!0}),u=await this.model(a);return[a,u]}}class TextClassificationPipeline extends Pipeline{async _call(n,{topk:a=1}={}){let[u,c]=await super._call(n),p=this.model.config.id2label,s=[];for(let h of c.logits){let l=getTopItems(softmax(h.data),a).map(function(o){return{label:p[o[0]],score:o[1]}});a===1?s.push(...l):s.push(l)}return Array.isArray(n)||a===1?s:s[0]}}class TokenClassificationPipeline extends Pipeline{async _call(n,{ignore_labels:a=["O"]}={}){let u=Array.isArray(n);u||(n=[n]);let c=this.tokenizer,[p,s]=await super._call(n),h=s.logits,f=this.model.config.id2label,l=[];for(let o=0;o[r,i]).filter(r=>r[1]>l),t=Array.from(softmax(p.end_logits[h].data)).map((r,i)=>[r,i]).filter(r=>r[1]>l),e=product(o,t).filter(r=>r[0][1]<=r[1][1]).map(r=>[r[0][1],r[1][1],r[0][0]*r[1][0]]).sort((r,i)=>i[2]-r[2]);for(let r=0;r{let i=[...f];return i[l]=r[0],{score:r[1],token:r[0],token_str:p.model.vocab[r[0]],sequence:p.decode(i,{skip_special_tokens:!0})}}))}return Array.isArray(n)?s:s[0]}}class Text2TextGenerationPipeline extends Pipeline{constructor(){super(...arguments);Se(this,"_key",null)}async _call(a,u={}){Array.isArray(a)||(a=[a]),this.model.config.prefix&&(a=a.map(l=>this.model.config.prefix+l));let c=this.model.config.task_specific_params;c&&c[this.task]&&c[this.task].prefix&&(a=a.map(l=>c[this.task].prefix+l));let p={padding:!0,truncation:!0},s;this instanceof TranslationPipeline&&"_build_translation_inputs"in this.tokenizer?s=this.tokenizer._build_translation_inputs(a,p,u).input_ids:s=this.tokenizer(a,p).input_ids;let h=await this.model.generate(s,u),f=this.tokenizer.batch_decode(h,{skip_special_tokens:!0});return this._key!==null&&(f=f.map(l=>this._key===null?l:{[this._key]:l})),f}}class SummarizationPipeline extends Text2TextGenerationPipeline{constructor(){super(...arguments);Se(this,"_key","summary_text")}}class TranslationPipeline extends Text2TextGenerationPipeline{constructor(){super(...arguments);Se(this,"_key","translation_text")}}class TextGenerationPipeline extends Pipeline{async _call(n,a={}){let u=typeof n=="string"||n instanceof String;u&&(n=[n]),this.tokenizer.padding_side="left";let c=this.tokenizer(n,{padding:!0,truncation:!0}),p=c.input_ids,s=c.attention_mask,h=await this.model.generate(p,a,null,{inputs_attention_mask:s});const f=this.tokenizer.batch_decode(h,{skip_special_tokens:!0}),l=Array.from({length:n.length},o=>[]);for(let o=0;o[c.toLowerCase(),p])),this.entailment_id=this.label2id.entailment,this.entailment_id===void 0&&(console.warn("Could not find 'entailment' in label2id mapping. Using 2 as entailment_id."),this.entailment_id=2),this.contradiction_id=this.label2id.contradiction,this.contradiction_id===void 0&&(console.warn("Could not find 'contradiction' in label2id mapping. Using 0 as contradiction_id."),this.contradiction_id=0)}async _call(n,a,{hypothesis_template:u="This example is {}.",multi_label:c=!1}={}){let p=Array.isArray(n);p||(n=[n]),Array.isArray(a)||(a=[a]);let s=a.map(l=>u.replace("{}",l)),h=c||a.length===1,f=[];for(let l of n){let o=[];for(let r of s){let i=this.tokenizer(l,{text_pair:r}),d=await this.model(i);h?o.push([d.logits.data[this.contradiction_id],d.logits.data[this.entailment_id]]):o.push(d.logits.data[this.entailment_id])}let t;h?t=o.map(r=>softmax(r)[1]):t=softmax(o);let e=t.map((r,i)=>[r,i]).sort((r,i)=>i[0]-r[0]);f.push({sequence:l,labels:e.map(r=>a[r[1]]),scores:e.map(r=>r[0])})}return p?f:f[0]}}class FeatureExtractionPipeline extends Pipeline{async _call(n,{pooling:a="none",normalize:u=!1}={}){let[c,p]=await super._call(n),s=p.last_hidden_state??p.logits;if(a!=="none")if(a==="mean")s=mean_pooling(s,c.attention_mask);else throw Error(`Pooling method '${a}' not supported.`);return u&&(s=s.normalize(2,-1)),s}}class AutomaticSpeechRecognitionPipeline extends Pipeline{constructor(n,a,u,c){super(n,a,u),this.processor=c}async _preprocess(n,a){return isString(n)&&(n=await read_audio(n,a)),n}async _call(n,a={}){let u=a.return_timestamps??!1,c=a.chunk_length_s??0,p=a.stride_length_s??null,s=a.chunk_callback??null,h=a.force_full_sequences??!1,f=pop(a,"language",null),l=pop(a,"task",null);if(f||l||u){if(a.forced_decoder_ids)throw new Error("Cannot specify `language`/`task`/`return_timestamps` and `forced_decoder_ids` at the same time.");let i=this.tokenizer.get_decoder_prompt_ids({language:f,task:l,no_timestamps:!u});i.length>0&&(a.forced_decoder_ids=i)}let o=!Array.isArray(n);o&&(n=[n]);const t=this.processor.feature_extractor.config.sampling_rate,e=this.processor.feature_extractor.config.chunk_length/this.model.config.max_source_positions;let r=[];for(let i of n){i=await this._preprocess(i,t);let d=[];if(c>0){if(p===null)p=c/6;else if(c<=p)throw Error("`chunk_length_s` must be larger than `stride_length_s`.");const b=t*c,_=t*p,v=b-2*_;let w=0;for(;w=i.length;d.push({stride:[S.length,O?0:_,x?0:_],input_features:A.input_features,is_last:x}),w+=v}}else d=[{stride:[i.length,0,0],input_features:(await this.processor(i)).input_features,is_last:!0}];for(let b of d){let _=await this.model.generate(b.input_features,a);b.tokens=_[0],b.stride=b.stride.map(v=>v/t),s!==null&&s(b)}let[g,m]=this.tokenizer._decode_asr(d,{time_precision:e,return_timestamps:u,force_full_sequences:h});r.push({text:g,...m})}return o?r[0]:r}}class ImageToTextPipeline extends Pipeline{constructor(n,a,u,c){super(n,a,u),this.processor=c}async _call(n,a={}){let u=Array.isArray(n);n=await prepareImages(n);let{pixel_values:c}=await this.processor(n),p=[];for(let s of c){s.dims=[1,...s.dims];let h=await this.model.generate(s,a),f=this.tokenizer.batch_decode(h,{skip_special_tokens:!0}).map(l=>({generated_text:l.trim()}));p.push(f)}return u?p:p[0]}}class ImageClassificationPipeline extends Pipeline{constructor(n,a,u){super(n,null,a),this.processor=u}async _call(n,{topk:a=1}={}){let u=Array.isArray(n);n=await prepareImages(n);let{pixel_values:c}=await this.processor(n),p=await this.model({pixel_values:c}),s=this.model.config.id2label,h=[];for(let f of p.logits){let o=getTopItems(softmax(f.data),a).map(function(t){return{label:s[t[0]],score:t[1]}});a===1?h.push(...o):h.push(o)}return u||a===1?h:h[0]}}class ImageSegmentationPipeline extends Pipeline{constructor(n,a,u){super(n,null,a),this.processor=u,this.subtasks_mapping={panoptic:"post_process_panoptic_segmentation",instance:"post_process_instance_segmentation",semantic:"post_process_semantic_segmentation"}}async _call(n,{threshold:a=.5,mask_threshold:u=.5,overlap_mask_area_threshold:c=.8,label_ids_to_fuse:p=null,target_sizes:s=null,subtask:h=null}={}){if(Array.isArray(n)&&n.length!==1)throw Error("Image segmentation pipeline currently only supports a batch size of 1.");n=await prepareImages(n);let l=n.map(d=>[d.height,d.width]),{pixel_values:o,pixel_mask:t}=await this.processor(n),e=await this.model({pixel_values:o,pixel_mask:t}),r=null;if(h!==null)r=this.subtasks_mapping[h];else for(let[d,g]of Object.entries(this.subtasks_mapping))if(g in this.processor.feature_extractor){r=this.processor.feature_extractor[g].bind(this.processor.feature_extractor),h=d;break}let i=[];if(h==="panoptic"||h==="instance"){let d=r(e,a,u,c,p,s??l)[0],g=d.segmentation,m=this.model.config.id2label;for(let b of d.segments_info){let _=new Uint8ClampedArray(g.data.length);for(let w=0;wu.replace("{}",o)),s=this.tokenizer(p,{padding:!0,truncation:!0}),{pixel_values:h}=await this.processor(n),f=await this.model({...s,pixel_values:h}),l=[];for(let o of f.logits_per_image){let t=softmax(o.data);l.push([...t].map((e,r)=>({score:e,label:a[r]})))}return c?l:l[0]}}class ObjectDetectionPipeline extends Pipeline{constructor(n,a,u){super(n,null,a),this.processor=u}async _call(n,{threshold:a=.9,percentage:u=!1}={}){let c=Array.isArray(n);if(c&&n.length!==1)throw Error("Object detection pipeline currently only supports a batch size of 1.");n=await prepareImages(n);let p=u?null:n.map(t=>[t.height,t.width]),{pixel_values:s,pixel_mask:h}=await this.processor(n),f=await this.model({pixel_values:s,pixel_mask:h}),l=this.processor.feature_extractor.post_process_object_detection(f,a,p),o=this.model.config.id2label;return l.forEach(t=>t.labels=t.classes.map(e=>o[e])),c?l:l[0]}}const SUPPORTED_TASKS={"text-classification":{tokenizer:AutoTokenizer,pipeline:TextClassificationPipeline,model:AutoModelForSequenceClassification,default:{model:"Xenova/distilbert-base-uncased-finetuned-sst-2-english"},type:"text"},"token-classification":{tokenizer:AutoTokenizer,pipeline:TokenClassificationPipeline,model:AutoModelForTokenClassification,default:{model:"Xenova/bert-base-multilingual-cased-ner-hrl"},type:"text"},"question-answering":{tokenizer:AutoTokenizer,pipeline:QuestionAnsweringPipeline,model:AutoModelForQuestionAnswering,default:{model:"Xenova/distilbert-base-cased-distilled-squad"},type:"text"},"fill-mask":{tokenizer:AutoTokenizer,pipeline:FillMaskPipeline,model:AutoModelForMaskedLM,default:{model:"Xenova/bert-base-uncased"},type:"text"},summarization:{tokenizer:AutoTokenizer,pipeline:SummarizationPipeline,model:AutoModelForSeq2SeqLM,default:{model:"Xenova/distilbart-cnn-6-6"},type:"text"},translation:{tokenizer:AutoTokenizer,pipeline:TranslationPipeline,model:AutoModelForSeq2SeqLM,default:{model:"Xenova/t5-small"},type:"text"},"text2text-generation":{tokenizer:AutoTokenizer,pipeline:Text2TextGenerationPipeline,model:AutoModelForSeq2SeqLM,default:{model:"Xenova/flan-t5-small"},type:"text"},"text-generation":{tokenizer:AutoTokenizer,pipeline:TextGenerationPipeline,model:AutoModelForCausalLM,default:{model:"Xenova/gpt2"},type:"text"},"zero-shot-classification":{tokenizer:AutoTokenizer,pipeline:ZeroShotClassificationPipeline,model:AutoModelForSequenceClassification,default:{model:"Xenova/distilbert-base-uncased-mnli"},type:"text"},"automatic-speech-recognition":{tokenizer:AutoTokenizer,pipeline:AutomaticSpeechRecognitionPipeline,model:AutoModelForSeq2SeqLM,processor:AutoProcessor,default:{model:"Xenova/whisper-tiny.en"},type:"multimodal"},"image-to-text":{tokenizer:AutoTokenizer,pipeline:ImageToTextPipeline,model:AutoModelForVision2Seq,processor:AutoProcessor,default:{model:"Xenova/vit-gpt2-image-captioning"},type:"multimodal"},"image-classification":{pipeline:ImageClassificationPipeline,model:AutoModelForImageClassification,processor:AutoProcessor,default:{model:"Xenova/vit-base-patch16-224"},type:"multimodal"},"image-segmentation":{pipeline:ImageSegmentationPipeline,model:AutoModelForImageSegmentation,processor:AutoProcessor,default:{model:"Xenova/detr-resnet-50-panoptic"},type:"multimodal"},"zero-shot-image-classification":{tokenizer:AutoTokenizer,pipeline:ZeroShotImageClassificationPipeline,model:AutoModel,processor:AutoProcessor,default:{model:"Xenova/clip-vit-base-patch32"},type:"multimodal"},"object-detection":{pipeline:ObjectDetectionPipeline,model:AutoModelForObjectDetection,processor:AutoProcessor,default:{model:"Xenova/detr-resnet-50"},type:"multimodal"},"feature-extraction":{tokenizer:AutoTokenizer,pipeline:FeatureExtractionPipeline,model:AutoModel,default:{model:"Xenova/all-MiniLM-L6-v2"},type:"text"}},TASK_ALIASES={"sentiment-analysis":"text-classification",ner:"token-classification",vqa:"visual-question-answering",asr:"automatic-speech-recognition",embeddings:"feature-extraction"};async function pipeline(y,n=null,{quantized:a=!0,progress_callback:u=null,config:c=null,cache_dir:p=null,local_files_only:s=!1,revision:h="main"}={}){y=TASK_ALIASES[y]??y;let f=SUPPORTED_TASKS[y.split("_",1)[0]];if(!f)throw Error(`Unsupported pipeline: ${y}. Must be one of [${Object.keys(SUPPORTED_TASKS)}]`);n||(n=f.default.model,console.log(`No model specified. Using default model: "${n}".`));let l=f.tokenizer,o=f.model,t=f.pipeline,e=f.processor,r=[],i={quantized:a,progress_callback:u,config:c,cache_dir:p,local_files_only:s,revision:h};l&&r.push(l.from_pretrained(n,i)),o&&r.push(o.from_pretrained(n,i)),e&&r.push(e.from_pretrained(n,i));let d=await Promise.all(r);return dispatchCallback(u,{status:"ready",task:y,model:n}),new t(y,...d)}function product(...y){return y.reduce((n,a)=>n.flatMap(u=>a.map(c=>[u,c])))}env.allowLocalModels=!1;class Singleton{constructor(n,a,u){this.tokenizer=n,this.model=a,this.quantized=u}static async getInstance(n=null){return this.instance===null&&(this.instance=pipeline(this.task,this.model,{quantized:this.quantized,progress_callback:n})),this.instance}}Se(Singleton,"task",null),Se(Singleton,"model",null),Se(Singleton,"quantized",null),Se(Singleton,"instance",null),self.addEventListener("message",async y=>{const n=y.data;if(n.action==="load"){await ImageClassificationPipelineSingleton.getInstance(),self.postMessage({status:"ready"});return}const a=new Uint8ClampedArray(n.image.data.length/4);for(let p=0;pawait(await ImageClassificationPipelineSingleton.getInstance())(y,{topk:0}).catch(u=>(self.postMessage({status:"error",task:"image-classification",data:u}),null))})(); diff --git a/spaces/XzJosh/ShanBao-Bert-VITS2/monotonic_align/__init__.py b/spaces/XzJosh/ShanBao-Bert-VITS2/monotonic_align/__init__.py deleted file mode 100644 index 75603d26cf2b8d6196f5a68a89f9e49d8e519bc8..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/ShanBao-Bert-VITS2/monotonic_align/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - -def maximum_path(neg_cent, mask): - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/XzJosh/Spade-Bert-VITS2/app.py b/spaces/XzJosh/Spade-Bert-VITS2/app.py deleted file mode 100644 index 70373828bb9480d692bbe23696bc968b2dfba10a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Spade-Bert-VITS2/app.py +++ /dev/null @@ -1,160 +0,0 @@ -import sys, os - -if sys.platform == "darwin": - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - -import logging - -logging.getLogger("numba").setLevel(logging.WARNING) -logging.getLogger("markdown_it").setLevel(logging.WARNING) -logging.getLogger("urllib3").setLevel(logging.WARNING) -logging.getLogger("matplotlib").setLevel(logging.WARNING) - -logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s") - -logger = logging.getLogger(__name__) - -import torch -import argparse -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -import gradio as gr -import webbrowser - - -net_g = None - - -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - del word2ph - - assert bert.shape[-1] == len(phone) - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - - return bert, phone, tone, language - -def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid): - global net_g - bert, phones, tones, lang_ids = get_text(text, "ZH", hps) - with torch.no_grad(): - x_tst=phones.to(device).unsqueeze(0) - tones=tones.to(device).unsqueeze(0) - lang_ids=lang_ids.to(device).unsqueeze(0) - bert = bert.to(device).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) - del phones - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) - audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio - , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() - del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers - return audio - -def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale): - with torch.no_grad(): - audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker) - return "Success", (hps.data.sampling_rate, audio) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--model_dir", default="./logs/Echo/G_2200.pth", help="path of your model") - parser.add_argument("--config_dir", default="./configs/config.json", help="path of your config file") - parser.add_argument("--share", default=False, help="make link public") - parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log") - - args = parser.parse_args() - if args.debug: - logger.info("Enable DEBUG-LEVEL log") - logging.basicConfig(level=logging.DEBUG) - hps = utils.get_hparams_from_file(args.config_dir) - device = "cuda:0" if torch.cuda.is_available() else "cpu" - ''' - device = ( - "cuda:0" - if torch.cuda.is_available() - else ( - "mps" - if sys.platform == "darwin" and torch.backends.mps.is_available() - else "cpu" - ) - ) - ''' - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).to(device) - _ = net_g.eval() - - _ = utils.load_checkpoint(args.model_dir, net_g, None, skip_optimizer=True) - - speaker_ids = hps.data.spk2id - speakers = list(speaker_ids.keys()) - with gr.Blocks() as app: - with gr.Row(): - with gr.Column(): - gr.Markdown(value=""" - 【AI黑桃影②】在线语音合成(Bert-Vits2)\n - 作者:Xz乔希 https://space.bilibili.com/5859321\n - 声音归属:黑桃影 https://space.bilibili.com/456368455\n - 【AI黑桃影①】https://huggingface.co/spaces/XzJosh/Echo-Bert-VITS2\n - Bert-VITS2项目:https://github.com/Stardust-minus/Bert-VITS2\n - 使用本模型请严格遵守法律法规!\n - 发布二创作品请标注本项目作者及链接、作品使用Bert-VITS2 AI生成!\n - """) - text = gr.TextArea(label="Text", placeholder="Input Text Here", - value="星空下的白色幻影,怪盗斯倍的埃叩参上!") - speaker = gr.Dropdown(choices=speakers, value=speakers[0], label='Speaker') - sdp_ratio = gr.Slider(minimum=0.1, maximum=1, value=0.2, step=0.01, label='SDP/DP混合比') - noise_scale = gr.Slider(minimum=0.1, maximum=1, value=0.5, step=0.01, label='感情调节') - noise_scale_w = gr.Slider(minimum=0.1, maximum=1, value=0.9, step=0.01, label='音素长度') - length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.01, label='生成长度') - btn = gr.Button("点击生成", variant="primary") - with gr.Column(): - text_output = gr.Textbox(label="Message") - audio_output = gr.Audio(label="Output Audio") - gr.Markdown(value=""" - 【AI塔菲】https://huggingface.co/spaces/XzJosh/Taffy-Bert-VITS2\n - 【AI东雪莲】https://huggingface.co/spaces/XzJosh/Azuma-Bert-VITS2\n - 【AI奶绿】https://huggingface.co/spaces/XzJosh/LAPLACE-Bert-VITS2\n - 【AI七海】https://huggingface.co/spaces/XzJosh/Nana7mi-Bert-VITS2\n - 【AI阿梓】https://huggingface.co/spaces/XzJosh/Azusa-Bert-VITS2\n - 【AI嘉然】https://huggingface.co/spaces/XzJosh/Diana-Bert-VITS2\n - 【AI向晚】https://huggingface.co/spaces/XzJosh/Ava-Bert-VITS2\n - 【AI乃琳】https://huggingface.co/spaces/XzJosh/Eileen-Bert-VITS2\n - 【AI贝拉】https://huggingface.co/spaces/XzJosh/Bella-Bert-VITS2\n - 【AI珈乐】https://huggingface.co/spaces/XzJosh/Carol-Bert-VITS2\n - 【AI星瞳】https://huggingface.co/spaces/XzJosh/XingTong-Bert-VITS2\n - 【AI尼奈】https://huggingface.co/spaces/XzJosh/nine1-Bert-VITS2\n - 【AI扇宝】https://huggingface.co/spaces/XzJosh/ShanBao-Bert-VITS2\n - 【AI剑魔】https://huggingface.co/spaces/XzJosh/Aatrox-Bert-VITS2\n - 【AI电棍】https://huggingface.co/spaces/XzJosh/otto-Bert-VITS2\n - 【AI恬豆】https://huggingface.co/spaces/XzJosh/Bekki-Bert-VITS2\n - """) - btn.click(tts_fn, - inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale], - outputs=[text_output, audio_output]) - -# webbrowser.open("http://127.0.0.1:6006") -# app.launch(server_port=6006, show_error=True) - - app.launch(show_error=True) diff --git a/spaces/YUANAI/DiffspeechResearch/mfa_usr/adapt.py b/spaces/YUANAI/DiffspeechResearch/mfa_usr/adapt.py deleted file mode 100644 index d1f509b9af8cf53d2b8fc910ac1eb41f441b8054..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/mfa_usr/adapt.py +++ /dev/null @@ -1,201 +0,0 @@ -import shutil -import os -import time -from montreal_forced_aligner import __version__ -from montreal_forced_aligner.corpus.align_corpus import AlignableCorpus -from montreal_forced_aligner.dictionary import Dictionary, MultispeakerDictionary -from montreal_forced_aligner.aligner import TrainableAligner, PretrainedAligner -from montreal_forced_aligner.models import AcousticModel -from montreal_forced_aligner.config import TEMP_DIR, align_yaml_to_config, load_basic_align, load_command_configuration, \ - train_yaml_to_config -from montreal_forced_aligner.utils import get_available_acoustic_languages, get_pretrained_acoustic_path, \ - get_available_dict_languages, validate_dictionary_arg -from montreal_forced_aligner.helper import setup_logger, log_config -from montreal_forced_aligner.exceptions import ArgumentError - - -def load_adapt_config(): - training_config, align_config = train_yaml_to_config('mfa_usr/adapt_config.yaml', require_mono=False) - training_config.training_configs[0].fmllr_iterations = list( - range(0, training_config.training_configs[0].num_iterations)) - training_config.training_configs[0].realignment_iterations = list(range(0, training_config.training_configs[ - 0].num_iterations)) - return training_config, align_config - - -class AcousticModel2(AcousticModel): - def adaptation_config(self): - train, align = load_adapt_config() - return train - - -def adapt_model(args, unknown_args=None): - command = 'align' - all_begin = time.time() - if not args.temp_directory: - temp_dir = TEMP_DIR - else: - temp_dir = os.path.expanduser(args.temp_directory) - corpus_name = os.path.basename(args.corpus_directory) - if corpus_name == '': - args.corpus_directory = os.path.dirname(args.corpus_directory) - corpus_name = os.path.basename(args.corpus_directory) - data_directory = os.path.join(temp_dir, corpus_name) - if args.config_path: - align_config = align_yaml_to_config(args.config_path) - else: - align_config = load_basic_align() - align_config.use_mp = not args.disable_mp - align_config.debug = args.debug - align_config.overwrite = args.overwrite - align_config.cleanup_textgrids = not args.disable_textgrid_cleanup - - if unknown_args: - align_config.update_from_args(unknown_args) - conf_path = os.path.join(data_directory, 'config.yml') - if getattr(args, 'clean', False) and os.path.exists(data_directory): - print('Cleaning old directory!') - shutil.rmtree(data_directory, ignore_errors=True) - if getattr(args, 'verbose', False): - log_level = 'debug' - else: - log_level = 'info' - logger = setup_logger(command, data_directory, console_level=log_level) - logger.debug('ALIGN CONFIG:') - log_config(logger, align_config) - conf = load_command_configuration(conf_path, {'dirty': False, - 'begin': all_begin, - 'version': __version__, - 'type': command, - 'corpus_directory': args.corpus_directory, - 'dictionary_path': args.dictionary_path, - 'acoustic_model_path': args.acoustic_model_path}) - if conf['dirty'] or conf['type'] != command \ - or conf['corpus_directory'] != args.corpus_directory \ - or conf['version'] != __version__ \ - or conf['dictionary_path'] != args.dictionary_path: - logger.warning( - 'WARNING: Using old temp directory, this might not be ideal for you, use the --clean flag to ensure no ' - 'weird behavior for previous versions of the temporary directory.') - if conf['dirty']: - logger.debug('Previous run ended in an error (maybe ctrl-c?)') - if conf['type'] != command: - logger.debug('Previous run was a different subcommand than {} (was {})'.format(command, conf['type'])) - if conf['corpus_directory'] != args.corpus_directory: - logger.debug('Previous run used source directory ' - 'path {} (new run: {})'.format(conf['corpus_directory'], args.corpus_directory)) - if conf['version'] != __version__: - logger.debug('Previous run was on {} version (new run: {})'.format(conf['version'], __version__)) - if conf['dictionary_path'] != args.dictionary_path: - logger.debug('Previous run used dictionary path {} ' - '(new run: {})'.format(conf['dictionary_path'], args.dictionary_path)) - if conf['acoustic_model_path'] != args.acoustic_model_path: - logger.debug('Previous run used acoustic model path {} ' - '(new run: {})'.format(conf['acoustic_model_path'], args.acoustic_model_path)) - - os.makedirs(data_directory, exist_ok=True) - model_directory = os.path.join(data_directory, 'acoustic_models') - os.makedirs(model_directory, exist_ok=True) - acoustic_model = AcousticModel2(args.acoustic_model_path, root_directory=model_directory) - print("| acoustic_model.meta", acoustic_model.meta) - acoustic_model.log_details(logger) - training_config = acoustic_model.adaptation_config() - training_config.training_configs[0].update({'beam': align_config.beam, 'retry_beam': align_config.retry_beam}) - training_config.update_from_align(align_config) - logger.debug('ADAPT TRAINING CONFIG:') - log_config(logger, training_config) - audio_dir = None - if args.audio_directory: - audio_dir = args.audio_directory - try: - corpus = AlignableCorpus(args.corpus_directory, data_directory, - speaker_characters=args.speaker_characters, - num_jobs=args.num_jobs, sample_rate=align_config.feature_config.sample_frequency, - logger=logger, use_mp=align_config.use_mp, punctuation=align_config.punctuation, - clitic_markers=align_config.clitic_markers, audio_directory=audio_dir) - if corpus.issues_check: - logger.warning('Some issues parsing the corpus were detected. ' - 'Please run the validator to get more information.') - logger.info(corpus.speaker_utterance_info()) - if args.dictionary_path.lower().endswith('.yaml'): - dictionary = MultispeakerDictionary(args.dictionary_path, data_directory, logger=logger, - punctuation=align_config.punctuation, - clitic_markers=align_config.clitic_markers, - compound_markers=align_config.compound_markers, - multilingual_ipa=acoustic_model.meta['multilingual_ipa'], - strip_diacritics=acoustic_model.meta.get('strip_diacritics', None), - digraphs=acoustic_model.meta.get('digraphs', None)) - else: - dictionary = Dictionary(args.dictionary_path, data_directory, logger=logger, - punctuation=align_config.punctuation, - clitic_markers=align_config.clitic_markers, - compound_markers=align_config.compound_markers, - multilingual_ipa=acoustic_model.meta['multilingual_ipa'], - strip_diacritics=acoustic_model.meta.get('strip_diacritics', None), - digraphs=acoustic_model.meta.get('digraphs', None)) - acoustic_model.validate(dictionary) - - begin = time.time() - previous = PretrainedAligner(corpus, dictionary, acoustic_model, align_config, - temp_directory=data_directory, - debug=getattr(args, 'debug', False), logger=logger) - a = TrainableAligner(corpus, dictionary, training_config, align_config, - temp_directory=data_directory, - debug=getattr(args, 'debug', False), logger=logger, pretrained_aligner=previous) - logger.debug('Setup adapter in {} seconds'.format(time.time() - begin)) - a.verbose = args.verbose - - begin = time.time() - a.train() - logger.debug('Performed adaptation in {} seconds'.format(time.time() - begin)) - - begin = time.time() - a.save(args.output_model_path, root_directory=model_directory) - a.export_textgrids(args.output_directory) - logger.debug('Exported TextGrids in {} seconds'.format(time.time() - begin)) - logger.info('All done!') - - except Exception as _: - conf['dirty'] = True - raise - finally: - handlers = logger.handlers[:] - for handler in handlers: - handler.close() - logger.removeHandler(handler) - conf.save(conf_path) - - -def validate_args(args, downloaded_acoustic_models, download_dictionaries): - if not os.path.exists(args.corpus_directory): - raise ArgumentError('Could not find the corpus directory {}.'.format(args.corpus_directory)) - if not os.path.isdir(args.corpus_directory): - raise ArgumentError('The specified corpus directory ({}) is not a directory.'.format(args.corpus_directory)) - - args.dictionary_path = validate_dictionary_arg(args.dictionary_path, download_dictionaries) - - if args.acoustic_model_path.lower() in downloaded_acoustic_models: - args.acoustic_model_path = get_pretrained_acoustic_path(args.acoustic_model_path.lower()) - elif args.acoustic_model_path.lower().endswith(AcousticModel.extension): - if not os.path.exists(args.acoustic_model_path): - raise ArgumentError('The specified model path does not exist: ' + args.acoustic_model_path) - else: - raise ArgumentError( - 'The language \'{}\' is not currently included in the distribution, ' - 'please align via training or specify one of the following language names: {}.'.format( - args.acoustic_model_path.lower(), ', '.join(downloaded_acoustic_models))) - - -def run_adapt_model(args, unknown_args=None, downloaded_acoustic_models=None, download_dictionaries=None): - if downloaded_acoustic_models is None: - downloaded_acoustic_models = get_available_acoustic_languages() - if download_dictionaries is None: - download_dictionaries = get_available_dict_languages() - try: - args.speaker_characters = int(args.speaker_characters) - except ValueError: - pass - args.corpus_directory = args.corpus_directory.rstrip('/').rstrip('\\') - - validate_args(args, downloaded_acoustic_models, download_dictionaries) - adapt_model(args, unknown_args) diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/configuration_utils.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/configuration_utils.py deleted file mode 100644 index ecf23010c3c15f0fd7608888cb22f19e0045daf4..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/configuration_utils.py +++ /dev/null @@ -1,613 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" ConfigMixin base class and utilities.""" -import dataclasses -import functools -import importlib -import inspect -import json -import os -import re -from collections import OrderedDict -from typing import Any, Dict, Tuple, Union - -import numpy as np - -from huggingface_hub import hf_hub_download -from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError -from requests import HTTPError - -from . import __version__ -from .utils import DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, DummyObject, deprecate, logging - - -logger = logging.get_logger(__name__) - -_re_configuration_file = re.compile(r"config\.(.*)\.json") - - -class FrozenDict(OrderedDict): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - for key, value in self.items(): - setattr(self, key, value) - - self.__frozen = True - - def __delitem__(self, *args, **kwargs): - raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") - - def setdefault(self, *args, **kwargs): - raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") - - def pop(self, *args, **kwargs): - raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") - - def update(self, *args, **kwargs): - raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") - - def __setattr__(self, name, value): - if hasattr(self, "__frozen") and self.__frozen: - raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.") - super().__setattr__(name, value) - - def __setitem__(self, name, value): - if hasattr(self, "__frozen") and self.__frozen: - raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.") - super().__setitem__(name, value) - - -class ConfigMixin: - r""" - Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all - methods for loading/downloading/saving classes inheriting from [`ConfigMixin`] with - - [`~ConfigMixin.from_config`] - - [`~ConfigMixin.save_config`] - - Class attributes: - - **config_name** (`str`) -- A filename under which the config should stored when calling - [`~ConfigMixin.save_config`] (should be overridden by parent class). - - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be - overridden by subclass). - - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass). - - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the init function - should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by - subclass). - """ - config_name = None - ignore_for_config = [] - has_compatibles = False - - _deprecated_kwargs = [] - - def register_to_config(self, **kwargs): - if self.config_name is None: - raise NotImplementedError(f"Make sure that {self.__class__} has defined a class name `config_name`") - # Special case for `kwargs` used in deprecation warning added to schedulers - # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument, - # or solve in a more general way. - kwargs.pop("kwargs", None) - for key, value in kwargs.items(): - try: - setattr(self, key, value) - except AttributeError as err: - logger.error(f"Can't set {key} with value {value} for {self}") - raise err - - if not hasattr(self, "_internal_dict"): - internal_dict = kwargs - else: - previous_dict = dict(self._internal_dict) - internal_dict = {**self._internal_dict, **kwargs} - logger.debug(f"Updating config from {previous_dict} to {internal_dict}") - - self._internal_dict = FrozenDict(internal_dict) - - def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): - """ - Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the - [`~ConfigMixin.from_config`] class method. - - Args: - save_directory (`str` or `os.PathLike`): - Directory where the configuration JSON file will be saved (will be created if it does not exist). - """ - if os.path.isfile(save_directory): - raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") - - os.makedirs(save_directory, exist_ok=True) - - # If we save using the predefined names, we can load using `from_config` - output_config_file = os.path.join(save_directory, self.config_name) - - self.to_json_file(output_config_file) - logger.info(f"Configuration saved in {output_config_file}") - - @classmethod - def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs): - r""" - Instantiate a Python class from a config dictionary - - Parameters: - config (`Dict[str, Any]`): - A config dictionary from which the Python class will be instantiated. Make sure to only load - configuration files of compatible classes. - return_unused_kwargs (`bool`, *optional*, defaults to `False`): - Whether kwargs that are not consumed by the Python class should be returned or not. - - kwargs (remaining dictionary of keyword arguments, *optional*): - Can be used to update the configuration object (after it being loaded) and initiate the Python class. - `**kwargs` will be directly passed to the underlying scheduler/model's `__init__` method and eventually - overwrite same named arguments of `config`. - - Examples: - - ```python - >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler - - >>> # Download scheduler from huggingface.co and cache. - >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cifar10-32") - - >>> # Instantiate DDIM scheduler class with same config as DDPM - >>> scheduler = DDIMScheduler.from_config(scheduler.config) - - >>> # Instantiate PNDM scheduler class with same config as DDPM - >>> scheduler = PNDMScheduler.from_config(scheduler.config) - ``` - """ - # <===== TO BE REMOVED WITH DEPRECATION - # TODO(Patrick) - make sure to remove the following lines when config=="model_path" is deprecated - if "pretrained_model_name_or_path" in kwargs: - config = kwargs.pop("pretrained_model_name_or_path") - - if config is None: - raise ValueError("Please make sure to provide a config as the first positional argument.") - # ======> - - if not isinstance(config, dict): - deprecation_message = "It is deprecated to pass a pretrained model name or path to `from_config`." - if "Scheduler" in cls.__name__: - deprecation_message += ( - f"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead." - " Otherwise, please make sure to pass a configuration dictionary instead. This functionality will" - " be removed in v1.0.0." - ) - elif "Model" in cls.__name__: - deprecation_message += ( - f"If you were trying to load a model, please use {cls}.load_config(...) followed by" - f" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary" - " instead. This functionality will be removed in v1.0.0." - ) - deprecate("config-passed-as-path", "1.0.0", deprecation_message, standard_warn=False) - config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs) - - init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs) - - # Allow dtype to be specified on initialization - if "dtype" in unused_kwargs: - init_dict["dtype"] = unused_kwargs.pop("dtype") - - # add possible deprecated kwargs - for deprecated_kwarg in cls._deprecated_kwargs: - if deprecated_kwarg in unused_kwargs: - init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg) - - # Return model and optionally state and/or unused_kwargs - model = cls(**init_dict) - - # make sure to also save config parameters that might be used for compatible classes - model.register_to_config(**hidden_dict) - - # add hidden kwargs of compatible classes to unused_kwargs - unused_kwargs = {**unused_kwargs, **hidden_dict} - - if return_unused_kwargs: - return (model, unused_kwargs) - else: - return model - - @classmethod - def get_config_dict(cls, *args, **kwargs): - deprecation_message = ( - f" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be" - " removed in version v1.0.0" - ) - deprecate("get_config_dict", "1.0.0", deprecation_message, standard_warn=False) - return cls.load_config(*args, **kwargs) - - @classmethod - def load_config( - cls, pretrained_model_name_or_path: Union[str, os.PathLike], return_unused_kwargs=False, **kwargs - ) -> Tuple[Dict[str, Any], Dict[str, Any]]: - r""" - Instantiate a Python class from a config dictionary - - Parameters: - pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): - Can be either: - - - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an - organization name, like `google/ddpm-celebahq-256`. - - A path to a *directory* containing model weights saved using [`~ConfigMixin.save_config`], e.g., - `./my_model_directory/`. - - cache_dir (`Union[str, os.PathLike]`, *optional*): - Path to a directory in which a downloaded pretrained model configuration should be cached if the - standard cache should not be used. - force_download (`bool`, *optional*, defaults to `False`): - Whether or not to force the (re-)download of the model weights and configuration files, overriding the - cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to delete incompletely received files. Will attempt to resume the download if such a - file exists. - proxies (`Dict[str, str]`, *optional*): - A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. - output_loading_info(`bool`, *optional*, defaults to `False`): - Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. - local_files_only(`bool`, *optional*, defaults to `False`): - Whether or not to only look at local files (i.e., do not try to download the model). - use_auth_token (`str` or *bool*, *optional*): - The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). - revision (`str`, *optional*, defaults to `"main"`): - The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a - git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any - identifier allowed by git. - subfolder (`str`, *optional*, defaults to `""`): - In case the relevant files are located inside a subfolder of the model repo (either remote in - huggingface.co or downloaded locally), you can specify the folder name here. - - - - It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated - models](https://huggingface.co/docs/hub/models-gated#gated-models). - - - - - - Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to - use this method in a firewalled environment. - - - """ - cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) - force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) - proxies = kwargs.pop("proxies", None) - use_auth_token = kwargs.pop("use_auth_token", None) - local_files_only = kwargs.pop("local_files_only", False) - revision = kwargs.pop("revision", None) - _ = kwargs.pop("mirror", None) - subfolder = kwargs.pop("subfolder", None) - - user_agent = {"file_type": "config"} - - pretrained_model_name_or_path = str(pretrained_model_name_or_path) - - if cls.config_name is None: - raise ValueError( - "`self.config_name` is not defined. Note that one should not load a config from " - "`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`" - ) - - if os.path.isfile(pretrained_model_name_or_path): - config_file = pretrained_model_name_or_path - elif os.path.isdir(pretrained_model_name_or_path): - if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)): - # Load from a PyTorch checkpoint - config_file = os.path.join(pretrained_model_name_or_path, cls.config_name) - elif subfolder is not None and os.path.isfile( - os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) - ): - config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) - else: - raise EnvironmentError( - f"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}." - ) - else: - try: - # Load from URL or cache if already cached - config_file = hf_hub_download( - pretrained_model_name_or_path, - filename=cls.config_name, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - subfolder=subfolder, - revision=revision, - ) - - except RepositoryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier" - " listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a" - " token having permission to this repo with `use_auth_token` or log in with `huggingface-cli" - " login`." - ) - except RevisionNotFoundError: - raise EnvironmentError( - f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for" - " this model name. Check the model page at" - f" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." - ) - except EntryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}." - ) - except HTTPError as err: - raise EnvironmentError( - "There was a specific connection error when trying to load" - f" {pretrained_model_name_or_path}:\n{err}" - ) - except ValueError: - raise EnvironmentError( - f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" - f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" - f" directory containing a {cls.config_name} file.\nCheckout your internet connection or see how to" - " run the library in offline mode at" - " 'https://huggingface.co/docs/diffusers/installation#offline-mode'." - ) - except EnvironmentError: - raise EnvironmentError( - f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from " - "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " - f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " - f"containing a {cls.config_name} file" - ) - - try: - # Load config dict - config_dict = cls._dict_from_json_file(config_file) - except (json.JSONDecodeError, UnicodeDecodeError): - raise EnvironmentError(f"It looks like the config file at '{config_file}' is not a valid JSON file.") - - if return_unused_kwargs: - return config_dict, kwargs - - return config_dict - - @staticmethod - def _get_init_keys(cls): - return set(dict(inspect.signature(cls.__init__).parameters).keys()) - - @classmethod - def extract_init_dict(cls, config_dict, **kwargs): - # 0. Copy origin config dict - original_dict = {k: v for k, v in config_dict.items()} - - # 1. Retrieve expected config attributes from __init__ signature - expected_keys = cls._get_init_keys(cls) - expected_keys.remove("self") - # remove general kwargs if present in dict - if "kwargs" in expected_keys: - expected_keys.remove("kwargs") - # remove flax internal keys - if hasattr(cls, "_flax_internal_args"): - for arg in cls._flax_internal_args: - expected_keys.remove(arg) - - # 2. Remove attributes that cannot be expected from expected config attributes - # remove keys to be ignored - if len(cls.ignore_for_config) > 0: - expected_keys = expected_keys - set(cls.ignore_for_config) - - # load diffusers library to import compatible and original scheduler - diffusers_library = importlib.import_module(__name__.split(".")[0]) - - if cls.has_compatibles: - compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)] - else: - compatible_classes = [] - - expected_keys_comp_cls = set() - for c in compatible_classes: - expected_keys_c = cls._get_init_keys(c) - expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c) - expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls) - config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls} - - # remove attributes from orig class that cannot be expected - orig_cls_name = config_dict.pop("_class_name", cls.__name__) - if orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name): - orig_cls = getattr(diffusers_library, orig_cls_name) - unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys - config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig} - - # remove private attributes - config_dict = {k: v for k, v in config_dict.items() if not k.startswith("_")} - - # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments - init_dict = {} - for key in expected_keys: - # if config param is passed to kwarg and is present in config dict - # it should overwrite existing config dict key - if key in kwargs and key in config_dict: - config_dict[key] = kwargs.pop(key) - - if key in kwargs: - # overwrite key - init_dict[key] = kwargs.pop(key) - elif key in config_dict: - # use value from config dict - init_dict[key] = config_dict.pop(key) - - # 4. Give nice warning if unexpected values have been passed - if len(config_dict) > 0: - logger.warning( - f"The config attributes {config_dict} were passed to {cls.__name__}, " - "but are not expected and will be ignored. Please verify your " - f"{cls.config_name} configuration file." - ) - - # 5. Give nice info if config attributes are initiliazed to default because they have not been passed - passed_keys = set(init_dict.keys()) - if len(expected_keys - passed_keys) > 0: - logger.info( - f"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values." - ) - - # 6. Define unused keyword arguments - unused_kwargs = {**config_dict, **kwargs} - - # 7. Define "hidden" config parameters that were saved for compatible classes - hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict} - - return init_dict, unused_kwargs, hidden_config_dict - - @classmethod - def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): - with open(json_file, "r", encoding="utf-8") as reader: - text = reader.read() - return json.loads(text) - - def __repr__(self): - return f"{self.__class__.__name__} {self.to_json_string()}" - - @property - def config(self) -> Dict[str, Any]: - """ - Returns the config of the class as a frozen dictionary - - Returns: - `Dict[str, Any]`: Config of the class. - """ - return self._internal_dict - - def to_json_string(self) -> str: - """ - Serializes this instance to a JSON string. - - Returns: - `str`: String containing all the attributes that make up this configuration instance in JSON format. - """ - config_dict = self._internal_dict if hasattr(self, "_internal_dict") else {} - config_dict["_class_name"] = self.__class__.__name__ - config_dict["_diffusers_version"] = __version__ - - def to_json_saveable(value): - if isinstance(value, np.ndarray): - value = value.tolist() - return value - - config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()} - return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" - - def to_json_file(self, json_file_path: Union[str, os.PathLike]): - """ - Save this instance to a JSON file. - - Args: - json_file_path (`str` or `os.PathLike`): - Path to the JSON file in which this configuration instance's parameters will be saved. - """ - with open(json_file_path, "w", encoding="utf-8") as writer: - writer.write(self.to_json_string()) - - -def register_to_config(init): - r""" - Decorator to apply on the init of classes inheriting from [`ConfigMixin`] so that all the arguments are - automatically sent to `self.register_for_config`. To ignore a specific argument accepted by the init but that - shouldn't be registered in the config, use the `ignore_for_config` class variable - - Warning: Once decorated, all private arguments (beginning with an underscore) are trashed and not sent to the init! - """ - - @functools.wraps(init) - def inner_init(self, *args, **kwargs): - # Ignore private kwargs in the init. - init_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")} - config_init_kwargs = {k: v for k, v in kwargs.items() if k.startswith("_")} - if not isinstance(self, ConfigMixin): - raise RuntimeError( - f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does " - "not inherit from `ConfigMixin`." - ) - - ignore = getattr(self, "ignore_for_config", []) - # Get positional arguments aligned with kwargs - new_kwargs = {} - signature = inspect.signature(init) - parameters = { - name: p.default for i, (name, p) in enumerate(signature.parameters.items()) if i > 0 and name not in ignore - } - for arg, name in zip(args, parameters.keys()): - new_kwargs[name] = arg - - # Then add all kwargs - new_kwargs.update( - { - k: init_kwargs.get(k, default) - for k, default in parameters.items() - if k not in ignore and k not in new_kwargs - } - ) - new_kwargs = {**config_init_kwargs, **new_kwargs} - getattr(self, "register_to_config")(**new_kwargs) - init(self, *args, **init_kwargs) - - return inner_init - - -def flax_register_to_config(cls): - original_init = cls.__init__ - - @functools.wraps(original_init) - def init(self, *args, **kwargs): - if not isinstance(self, ConfigMixin): - raise RuntimeError( - f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does " - "not inherit from `ConfigMixin`." - ) - - # Ignore private kwargs in the init. Retrieve all passed attributes - init_kwargs = {k: v for k, v in kwargs.items()} - - # Retrieve default values - fields = dataclasses.fields(self) - default_kwargs = {} - for field in fields: - # ignore flax specific attributes - if field.name in self._flax_internal_args: - continue - if type(field.default) == dataclasses._MISSING_TYPE: - default_kwargs[field.name] = None - else: - default_kwargs[field.name] = getattr(self, field.name) - - # Make sure init_kwargs override default kwargs - new_kwargs = {**default_kwargs, **init_kwargs} - # dtype should be part of `init_kwargs`, but not `new_kwargs` - if "dtype" in new_kwargs: - new_kwargs.pop("dtype") - - # Get positional arguments aligned with kwargs - for i, arg in enumerate(args): - name = fields[i].name - new_kwargs[name] = arg - - getattr(self, "register_to_config")(**new_kwargs) - original_init(self, *args, **kwargs) - - cls.__init__ = init - return cls diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_pndm.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_pndm.py deleted file mode 100644 index a29f7d6d44cc628ac64bcb7225c5c494d4c70131..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_pndm.py +++ /dev/null @@ -1,425 +0,0 @@ -# Copyright 2022 Zhejiang University Team and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim - -import math -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS -from .scheduling_utils import SchedulerMixin, SchedulerOutput - - -def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. - - - Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to - prevent singularities. - - Returns: - betas (`np.ndarray`): the betas used by the scheduler to step the model outputs - """ - - def alpha_bar(time_step): - return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 - - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return torch.tensor(betas, dtype=torch.float32) - - -class PNDMScheduler(SchedulerMixin, ConfigMixin): - """ - Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques, - namely Runge-Kutta method and a linear multi-step method. - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - For more details, see the original paper: https://arxiv.org/abs/2202.09778 - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - skip_prk_steps (`bool`): - allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required - before plms steps; defaults to `False`. - set_alpha_to_one (`bool`, default `False`): - each diffusion step uses the value of alphas product at that step and at the previous one. For the final - step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, - otherwise it uses the value of alpha at step 0. - prediction_type (`str`, default `epsilon`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 - https://imagen.research.google/video/paper.pdf) - steps_offset (`int`, default `0`): - an offset added to the inference steps. You can use a combination of `offset=1` and - `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in - stable diffusion. - - """ - - _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy() - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[Union[np.ndarray, List[float]]] = None, - skip_prk_steps: bool = False, - set_alpha_to_one: bool = False, - prediction_type: str = "epsilon", - steps_offset: int = 0, - ): - if trained_betas is not None: - self.betas = torch.tensor(trained_betas, dtype=torch.float32) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - elif beta_schedule == "squaredcos_cap_v2": - # Glide cosine schedule - self.betas = betas_for_alpha_bar(num_train_timesteps) - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - - self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] - - # standard deviation of the initial noise distribution - self.init_noise_sigma = 1.0 - - # For now we only support F-PNDM, i.e. the runge-kutta method - # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf - # mainly at formula (9), (12), (13) and the Algorithm 2. - self.pndm_order = 4 - - # running values - self.cur_model_output = 0 - self.counter = 0 - self.cur_sample = None - self.ets = [] - - # setable values - self.num_inference_steps = None - self._timesteps = np.arange(0, num_train_timesteps)[::-1].copy() - self.prk_timesteps = None - self.plms_timesteps = None - self.timesteps = None - - def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): - """ - Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - """ - - self.num_inference_steps = num_inference_steps - step_ratio = self.config.num_train_timesteps // self.num_inference_steps - # creates integer timesteps by multiplying by ratio - # casting to int to avoid issues when num_inference_step is power of 3 - self._timesteps = (np.arange(0, num_inference_steps) * step_ratio).round() - self._timesteps += self.config.steps_offset - - if self.config.skip_prk_steps: - # for some models like stable diffusion the prk steps can/should be skipped to - # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation - # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51 - self.prk_timesteps = np.array([]) - self.plms_timesteps = np.concatenate([self._timesteps[:-1], self._timesteps[-2:-1], self._timesteps[-1:]])[ - ::-1 - ].copy() - else: - prk_timesteps = np.array(self._timesteps[-self.pndm_order :]).repeat(2) + np.tile( - np.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order - ) - self.prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1].copy() - self.plms_timesteps = self._timesteps[:-3][ - ::-1 - ].copy() # we copy to avoid having negative strides which are not supported by torch.from_numpy - - timesteps = np.concatenate([self.prk_timesteps, self.plms_timesteps]).astype(np.int64) - self.timesteps = torch.from_numpy(timesteps).to(device) - - self.ets = [] - self.counter = 0 - - def step( - self, - model_output: torch.FloatTensor, - timestep: int, - sample: torch.FloatTensor, - return_dict: bool = True, - ) -> Union[SchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`. - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - - """ - if self.counter < len(self.prk_timesteps) and not self.config.skip_prk_steps: - return self.step_prk(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) - else: - return self.step_plms(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) - - def step_prk( - self, - model_output: torch.FloatTensor, - timestep: int, - sample: torch.FloatTensor, - return_dict: bool = True, - ) -> Union[SchedulerOutput, Tuple]: - """ - Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the - solution to the differential equation. - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is - True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - if self.num_inference_steps is None: - raise ValueError( - "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" - ) - - diff_to_prev = 0 if self.counter % 2 else self.config.num_train_timesteps // self.num_inference_steps // 2 - prev_timestep = timestep - diff_to_prev - timestep = self.prk_timesteps[self.counter // 4 * 4] - - if self.counter % 4 == 0: - self.cur_model_output += 1 / 6 * model_output - self.ets.append(model_output) - self.cur_sample = sample - elif (self.counter - 1) % 4 == 0: - self.cur_model_output += 1 / 3 * model_output - elif (self.counter - 2) % 4 == 0: - self.cur_model_output += 1 / 3 * model_output - elif (self.counter - 3) % 4 == 0: - model_output = self.cur_model_output + 1 / 6 * model_output - self.cur_model_output = 0 - - # cur_sample should not be `None` - cur_sample = self.cur_sample if self.cur_sample is not None else sample - - prev_sample = self._get_prev_sample(cur_sample, timestep, prev_timestep, model_output) - self.counter += 1 - - if not return_dict: - return (prev_sample,) - - return SchedulerOutput(prev_sample=prev_sample) - - def step_plms( - self, - model_output: torch.FloatTensor, - timestep: int, - sample: torch.FloatTensor, - return_dict: bool = True, - ) -> Union[SchedulerOutput, Tuple]: - """ - Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple - times to approximate the solution. - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is - True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - if self.num_inference_steps is None: - raise ValueError( - "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" - ) - - if not self.config.skip_prk_steps and len(self.ets) < 3: - raise ValueError( - f"{self.__class__} can only be run AFTER scheduler has been run " - "in 'prk' mode for at least 12 iterations " - "See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py " - "for more information." - ) - - prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps - - if self.counter != 1: - self.ets = self.ets[-3:] - self.ets.append(model_output) - else: - prev_timestep = timestep - timestep = timestep + self.config.num_train_timesteps // self.num_inference_steps - - if len(self.ets) == 1 and self.counter == 0: - model_output = model_output - self.cur_sample = sample - elif len(self.ets) == 1 and self.counter == 1: - model_output = (model_output + self.ets[-1]) / 2 - sample = self.cur_sample - self.cur_sample = None - elif len(self.ets) == 2: - model_output = (3 * self.ets[-1] - self.ets[-2]) / 2 - elif len(self.ets) == 3: - model_output = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 - else: - model_output = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) - - prev_sample = self._get_prev_sample(sample, timestep, prev_timestep, model_output) - self.counter += 1 - - if not return_dict: - return (prev_sample,) - - return SchedulerOutput(prev_sample=prev_sample) - - def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: - """ - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - - Args: - sample (`torch.FloatTensor`): input sample - - Returns: - `torch.FloatTensor`: scaled input sample - """ - return sample - - def _get_prev_sample(self, sample, timestep, prev_timestep, model_output): - # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf - # this function computes x_(t−δ) using the formula of (9) - # Note that x_t needs to be added to both sides of the equation - - # Notation ( -> - # alpha_prod_t -> α_t - # alpha_prod_t_prev -> α_(t−δ) - # beta_prod_t -> (1 - α_t) - # beta_prod_t_prev -> (1 - α_(t−δ)) - # sample -> x_t - # model_output -> e_θ(x_t, t) - # prev_sample -> x_(t−δ) - alpha_prod_t = self.alphas_cumprod[timestep] - alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod - beta_prod_t = 1 - alpha_prod_t - beta_prod_t_prev = 1 - alpha_prod_t_prev - - if self.config.prediction_type == "v_prediction": - model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample - elif self.config.prediction_type != "epsilon": - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`" - ) - - # corresponds to (α_(t−δ) - α_t) divided by - # denominator of x_t in formula (9) and plus 1 - # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) = - # sqrt(α_(t−δ)) / sqrt(α_t)) - sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) - - # corresponds to denominator of e_θ(x_t, t) in formula (9) - model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + ( - alpha_prod_t * beta_prod_t * alpha_prod_t_prev - ) ** (0.5) - - # full formula (9) - prev_sample = ( - sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff - ) - - return prev_sample - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.IntTensor, - ) -> torch.Tensor: - # Make sure alphas_cumprod and timestep have same device and dtype as original_samples - self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) - timesteps = timesteps.to(original_samples.device) - - sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 - sqrt_alpha_prod = sqrt_alpha_prod.flatten() - while len(sqrt_alpha_prod.shape) < len(original_samples.shape): - sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) - - sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 - sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() - while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): - sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) - - noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/Yiqin/ChatVID/model/fastchat/serve/api.py b/spaces/Yiqin/ChatVID/model/fastchat/serve/api.py deleted file mode 100644 index a5aeb579e5ad76e18c54b2663f2abc1f42d58160..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/fastchat/serve/api.py +++ /dev/null @@ -1,206 +0,0 @@ -"""This module provides a ChatGPT-compatible Restful API for chat completion. - -Usage: - -python3 -m fastchat.serve.api - -Reference: https://platform.openai.com/docs/api-reference/chat/create -""" -import asyncio -from typing import Union, Dict, List, Any - -import argparse -import json -import logging - -import fastapi -from fastapi.middleware.cors import CORSMiddleware -import httpx -import uvicorn -from pydantic import BaseSettings - -from fastchat.protocol.chat_completion import ( - ChatCompletionRequest, - ChatCompletionResponse, - ChatMessage, - ChatCompletionResponseChoice, -) -from fastchat.conversation import get_default_conv_template, SeparatorStyle -from fastchat.serve.inference import compute_skip_echo_len - -logger = logging.getLogger(__name__) - - -class AppSettings(BaseSettings): - # The address of the model controller. - FASTCHAT_CONTROLLER_URL: str = "http://localhost:21001" - - -app_settings = AppSettings() -app = fastapi.FastAPI() -headers = {"User-Agent": "FastChat API Server"} - - -@app.get("/v1/models") -async def show_available_models(): - controller_url = app_settings.FASTCHAT_CONTROLLER_URL - async with httpx.AsyncClient() as client: - ret = await client.post(controller_url + "/refresh_all_workers") - ret = await client.post(controller_url + "/list_models") - models = ret.json()["models"] - models.sort() - return {"data": [{"id": m} for m in models], "object": "list"} - - -@app.post("/v1/chat/completions") -async def create_chat_completion(request: ChatCompletionRequest): - """Creates a completion for the chat message""" - payload, skip_echo_len = generate_payload( - request.model, - request.messages, - temperature=request.temperature, - max_tokens=request.max_tokens, - stop=request.stop, - ) - - choices = [] - # TODO: batch the requests. maybe not necessary if using CacheFlow worker - chat_completions = [] - for i in range(request.n): - content = asyncio.create_task(chat_completion(request.model, payload, skip_echo_len)) - chat_completions.append(content) - - for i, content_task in enumerate(chat_completions): - content = await content_task - choices.append( - ChatCompletionResponseChoice( - index=i, - message=ChatMessage(role="assistant", content=content), - # TODO: support other finish_reason - finish_reason="stop", - ) - ) - - # TODO: support usage field - # "usage": { - # "prompt_tokens": 9, - # "completion_tokens": 12, - # "total_tokens": 21 - # } - return ChatCompletionResponse(choices=choices) - - -def generate_payload( - model_name: str, - messages: List[Dict[str, str]], - *, - temperature: float, - max_tokens: int, - stop: Union[str, None], -): - is_chatglm = "chatglm" in model_name.lower() - # TODO(suquark): The template is currently a reference. Here we have to make a copy. - # We use create a template factory to avoid this. - conv = get_default_conv_template(model_name).copy() - - # TODO(suquark): Conv.messages should be a list. But it is a tuple now. - # We should change it to a list. - conv.messages = list(conv.messages) - - for message in messages: - msg_role = message["role"] - if msg_role == "system": - conv.system = message["content"] - elif msg_role == "user": - conv.append_message(conv.roles[0], message["content"]) - elif msg_role == "assistant": - conv.append_message(conv.roles[1], message["content"]) - else: - raise ValueError(f"Unknown role: {msg_role}") - - # Add a blank message for the assistant. - conv.append_message(conv.roles[1], None) - - if is_chatglm: - prompt = conv.messages[conv.offset :] - else: - prompt = conv.get_prompt() - skip_echo_len = compute_skip_echo_len(model_name, conv, prompt) - - if stop is None: - stop = conv.sep if conv.sep_style == SeparatorStyle.SINGLE else conv.sep2 - - # TODO(suquark): We should get the default `max_new_tokens`` from the model. - if max_tokens is None: - max_tokens = 512 - - payload = { - "model": model_name, - "prompt": prompt, - "temperature": temperature, - "max_new_tokens": max_tokens, - "stop": stop, - } - - logger.debug(f"==== request ====\n{payload}") - return payload, skip_echo_len - - -async def chat_completion(model_name: str, payload: Dict[str, Any], skip_echo_len: int): - controller_url = app_settings.FASTCHAT_CONTROLLER_URL - async with httpx.AsyncClient() as client: - ret = await client.post( - controller_url + "/get_worker_address", json={"model": model_name} - ) - worker_addr = ret.json()["address"] - # No available worker - if worker_addr == "": - raise ValueError(f"No available worker for {model_name}") - - logger.debug(f"model_name: {model_name}, worker_addr: {worker_addr}") - - output = "" - delimiter = b"\0" - async with client.stream( - "POST", - worker_addr + "/worker_generate_stream", - headers=headers, - json=payload, - timeout=20, - ) as response: - content = await response.aread() - - for chunk in content.split(delimiter): - if not chunk: - continue - data = json.loads(chunk.decode()) - if data["error_code"] == 0: - output = data["text"][skip_echo_len:].strip() - - return output - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="FastChat ChatGPT-compatible Restful API server." - ) - parser.add_argument("--host", type=str, default="localhost", help="host name") - parser.add_argument("--port", type=int, default=8000, help="port number") - parser.add_argument("--allow-credentials", action="store_true", help="allow credentials") - parser.add_argument("--allowed-origins", type=json.loads, default=["*"], help="allowed origins") - parser.add_argument("--allowed-methods", type=json.loads, default=["*"], help="allowed methods") - parser.add_argument("--allowed-headers", type=json.loads, default=["*"], help="allowed headers") - - args = parser.parse_args() - - app.add_middleware( - CORSMiddleware, - allow_origins=args.allowed_origins, - allow_credentials=args.allow_credentials, - allow_methods=args.allowed_methods, - allow_headers=args.allowed_headers, - ) - - logger.debug(f"==== args ====\n{args}") - - uvicorn.run("fastchat.serve.api:app", host=args.host, port=args.port, reload=True) diff --git a/spaces/YuanMio/vits-uma-genshin-honkai/transforms.py b/spaces/YuanMio/vits-uma-genshin-honkai/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/YuanMio/vits-uma-genshin-honkai/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/Yudha515/Rvc-Models/CHANGELOG.md b/spaces/Yudha515/Rvc-Models/CHANGELOG.md deleted file mode 100644 index 24fc214df236b40efead4b1585b01632d9658e9b..0000000000000000000000000000000000000000 --- a/spaces/Yudha515/Rvc-Models/CHANGELOG.md +++ /dev/null @@ -1,23 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - -## [0.0.2a] - TBD - -Improved demo, fixed top p (thanks @jnordberg). - -Compressor tanh on output to avoid clipping with some style (especially piano). -Now repeating the conditioning periodically if it is too short. - -More options when launching Gradio app locally (thanks @ashleykleynhans). - -Testing out PyTorch 2.0 memory efficient attention. - -Added extended generation (infinite length) by slowly moving the windows. -Note that other implementations exist: https://github.com/camenduru/MusicGen-colab. - -## [0.0.1] - 2023-06-09 - -Initial release, with model evaluation only. diff --git a/spaces/Yuyang2022/Translation_yue_to_any/app.py b/spaces/Yuyang2022/Translation_yue_to_any/app.py deleted file mode 100644 index fcebd0e249bc01bf431d8b25d0bb160ecafe19cb..0000000000000000000000000000000000000000 --- a/spaces/Yuyang2022/Translation_yue_to_any/app.py +++ /dev/null @@ -1,306 +0,0 @@ -from transformers import pipeline -import tempfile -import gradio as gr -from neon_tts_plugin_coqui import CoquiTTS -import os -import time -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline -#from flores200_codes import flores_codes - -pipe = pipeline(model="Yuyang2022/yue") # change to "your-username/the-name-you-picked" -LANGUAGES = list(CoquiTTS.langs.keys()) -coquiTTS = CoquiTTS() - -def audio_tts(audio, language:str, lang): - text = pipe(audio)["text"] - text = translation("zho_Hant", lang, text) - with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp: - coquiTTS.get_tts(text, fp, speaker = {"language" : language}) - return fp.name - - -def load_models(): - # build model and tokenizer - model_name_dict = { - "nllb-distilled-600M": "facebook/nllb-200-distilled-600M", - } - - model_dict = {} - - for call_name, real_name in model_name_dict.items(): - print("\tLoading model: %s" % call_name) - model = AutoModelForSeq2SeqLM.from_pretrained(real_name) - tokenizer = AutoTokenizer.from_pretrained(real_name) - model_dict[call_name + "_model"] = model - model_dict[call_name + "_tokenizer"] = tokenizer - - return model_dict - - -def translation(source, target, text): - if len(model_dict) == 2: - model_name = "nllb-distilled-600M" - - start_time = time.time() - source = "zho_Hant" #flores_codes[source] - target = flores_codes[target] - - model = model_dict[model_name + "_model"] - tokenizer = model_dict[model_name + "_tokenizer"] - - translator = pipeline( - "translation", - model=model, - tokenizer=tokenizer, - src_lang=source, - tgt_lang=target, - ) - output = translator(text, max_length=400) - - end_time = time.time() - - output = output[0]["translation_text"] - result = { - "inference_time": end_time - start_time, - "source": source, - "target": target, - "result": output, - } - return output - - -if __name__ == "__main__": - #print("\tinit models") - - global model_dict - - model_dict = load_models() - - codes_as_string = '''Acehnese (Arabic script) ace_Arab -Acehnese (Latin script) ace_Latn -Mesopotamian Arabic acm_Arab -Ta’izzi-Adeni Arabic acq_Arab -Tunisian Arabic aeb_Arab -Afrikaans afr_Latn -South Levantine Arabic ajp_Arab -Akan aka_Latn -Amharic amh_Ethi -North Levantine Arabic apc_Arab -Modern Standard Arabic arb_Arab -Modern Standard Arabic (Romanized) arb_Latn -Najdi Arabic ars_Arab -Moroccan Arabic ary_Arab -Egyptian Arabic arz_Arab -Assamese asm_Beng -Asturian ast_Latn -Awadhi awa_Deva -Central Aymara ayr_Latn -South Azerbaijani azb_Arab -North Azerbaijani azj_Latn -Bashkir bak_Cyrl -Bambara bam_Latn -Balinese ban_Latn -Belarusian bel_Cyrl -Bemba bem_Latn -Bengali ben_Beng -Bhojpuri bho_Deva -Banjar (Arabic script) bjn_Arab -Banjar (Latin script) bjn_Latn -Standard Tibetan bod_Tibt -Bosnian bos_Latn -Buginese bug_Latn -Bulgarian bul_Cyrl -Catalan cat_Latn -Cebuano ceb_Latn -Czech ces_Latn -Chokwe cjk_Latn -Central Kurdish ckb_Arab -Crimean Tatar crh_Latn -Welsh cym_Latn -Danish dan_Latn -German deu_Latn -Southwestern Dinka dik_Latn -Dyula dyu_Latn -Dzongkha dzo_Tibt -Greek ell_Grek -English eng_Latn -Esperanto epo_Latn -Estonian est_Latn -Basque eus_Latn -Ewe ewe_Latn -Faroese fao_Latn -Fijian fij_Latn -Finnish fin_Latn -Fon fon_Latn -French fra_Latn -Friulian fur_Latn -Nigerian Fulfulde fuv_Latn -Scottish Gaelic gla_Latn -Irish gle_Latn -Galician glg_Latn -Guarani grn_Latn -Gujarati guj_Gujr -Haitian Creole hat_Latn -Hausa hau_Latn -Hebrew heb_Hebr -Hindi hin_Deva -Chhattisgarhi hne_Deva -Croatian hrv_Latn -Hungarian hun_Latn -Armenian hye_Armn -Igbo ibo_Latn -Ilocano ilo_Latn -Indonesian ind_Latn -Icelandic isl_Latn -Italian ita_Latn -Javanese jav_Latn -Japanese jpn_Jpan -Kabyle kab_Latn -Jingpho kac_Latn -Kamba kam_Latn -Kannada kan_Knda -Kashmiri (Arabic script) kas_Arab -Kashmiri (Devanagari script) kas_Deva -Georgian kat_Geor -Central Kanuri (Arabic script) knc_Arab -Central Kanuri (Latin script) knc_Latn -Kazakh kaz_Cyrl -Kabiyè kbp_Latn -Kabuverdianu kea_Latn -Khmer khm_Khmr -Kikuyu kik_Latn -Kinyarwanda kin_Latn -Kyrgyz kir_Cyrl -Kimbundu kmb_Latn -Northern Kurdish kmr_Latn -Kikongo kon_Latn -Korean kor_Hang -Lao lao_Laoo -Ligurian lij_Latn -Limburgish lim_Latn -Lingala lin_Latn -Lithuanian lit_Latn -Lombard lmo_Latn -Latgalian ltg_Latn -Luxembourgish ltz_Latn -Luba-Kasai lua_Latn -Ganda lug_Latn -Luo luo_Latn -Mizo lus_Latn -Standard Latvian lvs_Latn -Magahi mag_Deva -Maithili mai_Deva -Malayalam mal_Mlym -Marathi mar_Deva -Minangkabau (Arabic script) min_Arab -Minangkabau (Latin script) min_Latn -Macedonian mkd_Cyrl -Plateau Malagasy plt_Latn -Maltese mlt_Latn -Meitei (Bengali script) mni_Beng -Halh Mongolian khk_Cyrl -Mossi mos_Latn -Maori mri_Latn -Burmese mya_Mymr -Dutch nld_Latn -Norwegian Nynorsk nno_Latn -Norwegian Bokmål nob_Latn -Nepali npi_Deva -Northern Sotho nso_Latn -Nuer nus_Latn -Nyanja nya_Latn -Occitan oci_Latn -West Central Oromo gaz_Latn -Odia ory_Orya -Pangasinan pag_Latn -Eastern Panjabi pan_Guru -Papiamento pap_Latn -Western Persian pes_Arab -Polish pol_Latn -Portuguese por_Latn -Dari prs_Arab -Southern Pashto pbt_Arab -Ayacucho Quechua quy_Latn -Romanian ron_Latn -Rundi run_Latn -Russian rus_Cyrl -Sango sag_Latn -Sanskrit san_Deva -Santali sat_Olck -Sicilian scn_Latn -Shan shn_Mymr -Sinhala sin_Sinh -Slovak slk_Latn -Slovenian slv_Latn -Samoan smo_Latn -Shona sna_Latn -Sindhi snd_Arab -Somali som_Latn -Southern Sotho sot_Latn -Spanish spa_Latn -Tosk Albanian als_Latn -Sardinian srd_Latn -Serbian srp_Cyrl -Swati ssw_Latn -Sundanese sun_Latn -Swedish swe_Latn -Swahili swh_Latn -Silesian szl_Latn -Tamil tam_Taml -Tatar tat_Cyrl -Telugu tel_Telu -Tajik tgk_Cyrl -Tagalog tgl_Latn -Thai tha_Thai -Tigrinya tir_Ethi -Tamasheq (Latin script) taq_Latn -Tamasheq (Tifinagh script) taq_Tfng -Tok Pisin tpi_Latn -Tswana tsn_Latn -Tsonga tso_Latn -Turkmen tuk_Latn -Tumbuka tum_Latn -Turkish tur_Latn -Twi twi_Latn -Central Atlas Tamazight tzm_Tfng -Uyghur uig_Arab -Ukrainian ukr_Cyrl -Umbundu umb_Latn -Urdu urd_Arab -Northern Uzbek uzn_Latn -Venetian vec_Latn -Vietnamese vie_Latn -Waray war_Latn -Wolof wol_Latn -Xhosa xho_Latn -Eastern Yiddish ydd_Hebr -Yoruba yor_Latn -Yue Chinese yue_Hant -Chinese (Simplified) zho_Hans -Chinese (Traditional) zho_Hant -Standard Malay zsm_Latn -Zulu zul_Latn''' - - codes_as_string = codes_as_string.split('\n') - - flores_codes = {} - for code in codes_as_string: - lang, lang_code = code.split('\t') - flores_codes[lang] = lang_code - - - lang_codes = list(flores_codes.keys()) - - # define gradio demo - inputs = [gr.Audio(source="microphone", type="filepath"), - gr.Radio( - label="Target text Language", - choices=LANGUAGES, value="en"), - gr.inputs.Dropdown(lang_codes, default="English", label="Target text Language"),] - outputs = gr.Audio(label="Output") - - demo = gr.Interface(fn=audio_tts, inputs=inputs, outputs=outputs, - title="translation - speech to speech", - description="Realtime demo for speech translation.",) - - demo.launch() \ No newline at end of file diff --git a/spaces/Yuzu22/rvc-models/infer_pack/modules.py b/spaces/Yuzu22/rvc-models/infer_pack/modules.py deleted file mode 100644 index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000 --- a/spaces/Yuzu22/rvc-models/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/ZeroTwo3/WavJourney/code_generator.py b/spaces/ZeroTwo3/WavJourney/code_generator.py deleted file mode 100644 index 8a2192251d9df420a96430e602d5d115fac79047..0000000000000000000000000000000000000000 --- a/spaces/ZeroTwo3/WavJourney/code_generator.py +++ /dev/null @@ -1,188 +0,0 @@ -import os -import json5 -import utils - - -def check_json_script(data): - foreground_mandatory_attrs_map = { - 'music': ['vol', 'len', 'desc'], - 'sound_effect': ['vol', 'len', 'desc'], - 'speech': ['vol', 'text'] - } - background_mandatory_attrs_map = { - 'music': ['vol', 'desc'], - 'sound_effect': ['vol', 'desc'], - } - - def check_by_audio_type(audio, mandatory_attrs_map, audio_str): - if audio['audio_type'] not in mandatory_attrs_map: - raise ValueError('audio_type is not allowed in this layout, audio={audio_str}') - for attr_name in mandatory_attrs_map[audio['audio_type']]: - if attr_name not in audio: - raise ValueError(f'{attr_name} does not exist, audio={audio_str}') - - # Check json's format - for audio in data: - audio_str = json5.dumps(audio, indent=None) - if 'layout' not in audio: - raise ValueError(f'layout missing, audio={audio_str}') - elif 'audio_type' not in audio: - raise ValueError(f'audio_type missing, audio={audio_str}') - elif audio['layout'] == 'foreground': - check_by_audio_type(audio, foreground_mandatory_attrs_map, audio_str) - elif audio['layout'] == 'background': - if 'id' not in audio: - raise ValueError(f'id not in background audio, audio={audio_str}') - if 'action' not in audio: - raise ValueError(f'action not in background audio, audio={audio_str}') - if audio['action'] == 'begin': - check_by_audio_type(audio, background_mandatory_attrs_map, audio_str) - else: - if audio['action'] != 'end': - raise ValueError(f'Unknown action, audio={audio_str}') - else: - raise ValueError(f'Unknown layout, audio={audio_str}') - #except Exception as err: - # sys.stderr.write(f'PARSING ERROR: {err}, audio={json5.dumps(audio, indent=None)}\n') - # all_clear = False - - -def collect_and_check_audio_data(data): - fg_audio_id = 0 - fg_audios = [] - bg_audios = [] - # Collect all the foreground and background audio ids used to calculate background audio length later - for audio in data: - if audio['layout'] == 'foreground': - audio['id'] = fg_audio_id - fg_audios.append(audio) - fg_audio_id += 1 - else: # background - if audio['action'] == 'begin': - audio['begin_fg_audio_id'] = fg_audio_id - bg_audios.append(audio) - else: # ends - # find the backgound with the id, and update its 'end_fg_audio_id' - for bg_audio in bg_audios: - if bg_audio['id'] == audio['id'] and bg_audio['audio_type'] == audio['audio_type']: - bg_audio['end_fg_audio_id'] = fg_audio_id - break - - # check if all background audios are valid - for bg_audio in bg_audios: - if 'begin_fg_audio_id' not in bg_audio: - raise ValueError(f'begin of background missing, audio={bg_audio}') - elif 'end_fg_audio_id' not in bg_audio: - raise ValueError(f'end of background missing, audio={bg_audio}') - - if bg_audio['begin_fg_audio_id'] > bg_audio['end_fg_audio_id']: - raise ValueError(f'background audio ends before start, audio={bg_audio}') - elif bg_audio['begin_fg_audio_id'] == bg_audio['end_fg_audio_id']: - raise ValueError(f'background audio contains no foreground audio, audio={bg_audio}') - #except Exception as err: - # sys.stderr.write(f'ALIGNMENT ERROR: {err}, audio={bg_audio}\n') - # return None, None - - return fg_audios, bg_audios - - -class AudioCodeGenerator: - def __init__(self): - self.wav_counters = { - 'bg_sound_effect': 0, - 'bg_music': 0, - 'idle': 0, - 'fg_sound_effect': 0, - 'fg_music': 0, - 'fg_speech': 0, - } - self.code = '' - - def append_code(self, content): - self.code = f'{self.code}{content}\n' - - def generate_code(self, fg_audios, bg_audios, output_path, result_filename): - def get_wav_name(audio): - audio_type = audio['audio_type'] - layout = 'fg' if audio['layout'] == 'foreground' else 'bg' - wav_type = f'{layout}_{audio_type}' if layout else audio_type - desc = audio['text'] if 'text' in audio else audio['desc'] - desc = utils.text_to_abbrev_prompt(desc) - wav_filename = f'{wav_type}_{self.wav_counters[wav_type]}_{desc}.wav' - self.wav_counters[wav_type] += 1 - return wav_filename - - header = f''' -import os -import sys -import datetime - -from APIs import TTM, TTS, TTA, MIX, CAT, COMPUTE_LEN - - -fg_audio_lens = [] -wav_path = \"{output_path.absolute()}/audio\" -os.makedirs(wav_path, exist_ok=True) - -''' - self.append_code(header) - - fg_audio_wavs = [] - for fg_audio in fg_audios: - wav_name = get_wav_name(fg_audio) - if fg_audio['audio_type'] == 'sound_effect': - self.append_code(f'TTA(text=\"{fg_audio["desc"]}\", length={fg_audio["len"]}, volume={fg_audio["vol"]}, out_wav=os.path.join(wav_path, \"{wav_name}\"))') - elif fg_audio['audio_type'] == 'music': - self.append_code(f'TTM(text=\"{fg_audio["desc"]}\", length={fg_audio["len"]}, volume={fg_audio["vol"]}, out_wav=os.path.join(wav_path, \"{wav_name}\"))') - elif fg_audio['audio_type'] == 'speech': - npz_path = self.char_to_voice_map[fg_audio["character"]]["npz_path"] - npz_full_path = os.path.abspath(npz_path) if os.path.exists(npz_path) else npz_path - self.append_code(f'TTS(text=\"{fg_audio["text"]}\", speaker_id=\"{self.char_to_voice_map[fg_audio["character"]]["id"]}\", volume={fg_audio["vol"]}, out_wav=os.path.join(wav_path, \"{wav_name}\"), speaker_npz=\"{npz_full_path}\")') - fg_audio_wavs.append(wav_name) - self.append_code(f'fg_audio_lens.append(COMPUTE_LEN(os.path.join(wav_path, \"{wav_name}\")))\n') - - # cat all foreground audio together - self.append_code(f'fg_audio_wavs = []') - for wav_filename in fg_audio_wavs: - self.append_code(f'fg_audio_wavs.append(os.path.join(wav_path, \"{wav_filename}\"))') - self.append_code(f'CAT(wavs=fg_audio_wavs, out_wav=os.path.join(wav_path, \"foreground.wav\"))') - - bg_audio_wavs = [] - self.append_code(f'\nbg_audio_offsets = []') - for bg_audio in bg_audios: - wav_name = get_wav_name(bg_audio) - self.append_code(f'bg_audio_len = sum(fg_audio_lens[{bg_audio["begin_fg_audio_id"]}:{bg_audio["end_fg_audio_id"]}])') - self.append_code(f'bg_audio_offset = sum(fg_audio_lens[:{bg_audio["begin_fg_audio_id"]}])') - if bg_audio['audio_type'] == 'sound_effect': - self.append_code(f'TTA(text=\"{bg_audio["desc"]}\", volume={bg_audio["vol"]}, length=bg_audio_len, out_wav=os.path.join(wav_path, \"{wav_name}\"))') - elif bg_audio['audio_type'] == 'music': - self.append_code(f'TTM(text=\"{bg_audio["desc"]}\", volume={bg_audio["vol"]}, length=bg_audio_len, out_wav=os.path.join(wav_path, \"{wav_name}\"))') - else: - raise ValueError() - bg_audio_wavs.append(wav_name) - self.append_code(f'bg_audio_offsets.append(bg_audio_offset)\n') - self.append_code(f'bg_audio_wavs = []') - for wav_filename in bg_audio_wavs: - self.append_code(f'bg_audio_wavs.append(os.path.join(wav_path, \"{wav_filename}\"))') - - self.append_code(f'bg_audio_wav_offset_pairs = list(zip(bg_audio_wavs, bg_audio_offsets))') - self.append_code(f'bg_audio_wav_offset_pairs.append((os.path.join(wav_path, \"foreground.wav\"), 0))') - self.append_code(f'MIX(wavs=bg_audio_wav_offset_pairs, out_wav=os.path.join(wav_path, \"{result_filename}.wav\"))') - - - def init_char_to_voice_map(self, filename): - with open(filename, 'r') as file: - self.char_to_voice_map = json5.load(file) - - - def parse_and_generate(self, script_filename, char_to_voice_map_filename, output_path, result_filename='result'): - self.code = '' - self.init_char_to_voice_map(char_to_voice_map_filename) - - with open(script_filename, 'r') as file: - data = json5.load(file) - - check_json_script(data) - fg_audios, bg_audios = collect_and_check_audio_data(data) - self.generate_code(fg_audios, bg_audios, output_path, result_filename) - return self.code diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/upfirdn2d.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/upfirdn2d.py deleted file mode 100644 index c8bb2c3c949eed38a6465ed369fa881538dca010..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/upfirdn2d.py +++ /dev/null @@ -1,330 +0,0 @@ -# modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501 - -# Copyright (c) 2021, NVIDIA Corporation. All rights reserved. -# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator -# Augmentation (ADA) -# ======================================================================= - -# 1. Definitions - -# "Licensor" means any person or entity that distributes its Work. - -# "Software" means the original work of authorship made available under -# this License. - -# "Work" means the Software and any additions to or derivative works of -# the Software that are made available under this License. - -# The terms "reproduce," "reproduction," "derivative works," and -# "distribution" have the meaning as provided under U.S. copyright law; -# provided, however, that for the purposes of this License, derivative -# works shall not include works that remain separable from, or merely -# link (or bind by name) to the interfaces of, the Work. - -# Works, including the Software, are "made available" under this License -# by including in or with the Work either (a) a copyright notice -# referencing the applicability of this License to the Work, or (b) a -# copy of this License. - -# 2. License Grants - -# 2.1 Copyright Grant. Subject to the terms and conditions of this -# License, each Licensor grants to you a perpetual, worldwide, -# non-exclusive, royalty-free, copyright license to reproduce, -# prepare derivative works of, publicly display, publicly perform, -# sublicense and distribute its Work and any resulting derivative -# works in any form. - -# 3. Limitations - -# 3.1 Redistribution. You may reproduce or distribute the Work only -# if (a) you do so under this License, (b) you include a complete -# copy of this License with your distribution, and (c) you retain -# without modification any copyright, patent, trademark, or -# attribution notices that are present in the Work. - -# 3.2 Derivative Works. You may specify that additional or different -# terms apply to the use, reproduction, and distribution of your -# derivative works of the Work ("Your Terms") only if (a) Your Terms -# provide that the use limitation in Section 3.3 applies to your -# derivative works, and (b) you identify the specific derivative -# works that are subject to Your Terms. Notwithstanding Your Terms, -# this License (including the redistribution requirements in Section -# 3.1) will continue to apply to the Work itself. - -# 3.3 Use Limitation. The Work and any derivative works thereof only -# may be used or intended for use non-commercially. Notwithstanding -# the foregoing, NVIDIA and its affiliates may use the Work and any -# derivative works commercially. As used herein, "non-commercially" -# means for research or evaluation purposes only. - -# 3.4 Patent Claims. If you bring or threaten to bring a patent claim -# against any Licensor (including any claim, cross-claim or -# counterclaim in a lawsuit) to enforce any patents that you allege -# are infringed by any Work, then your rights under this License from -# such Licensor (including the grant in Section 2.1) will terminate -# immediately. - -# 3.5 Trademarks. This License does not grant any rights to use any -# Licensor’s or its affiliates’ names, logos, or trademarks, except -# as necessary to reproduce the notices described in this License. - -# 3.6 Termination. If you violate any term of this License, then your -# rights under this License (including the grant in Section 2.1) will -# terminate immediately. - -# 4. Disclaimer of Warranty. - -# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR -# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER -# THIS LICENSE. - -# 5. Limitation of Liability. - -# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL -# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE -# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, -# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF -# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK -# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, -# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER -# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF -# THE POSSIBILITY OF SUCH DAMAGES. - -# ======================================================================= - -import torch -from torch.autograd import Function -from torch.nn import functional as F - -from annotator.uniformer.mmcv.utils import to_2tuple -from ..utils import ext_loader - -upfirdn2d_ext = ext_loader.load_ext('_ext', ['upfirdn2d']) - - -class UpFirDn2dBackward(Function): - - @staticmethod - def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, - in_size, out_size): - - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_ext.upfirdn2d( - grad_output, - grad_kernel, - up_x=down_x, - up_y=down_y, - down_x=up_x, - down_y=up_y, - pad_x0=g_pad_x0, - pad_x1=g_pad_x1, - pad_y0=g_pad_y0, - pad_y1=g_pad_y1) - grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], - in_size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.in_size = in_size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], - ctx.in_size[3], 1) - - gradgrad_out = upfirdn2d_ext.upfirdn2d( - gradgrad_input, - kernel, - up_x=ctx.up_x, - up_y=ctx.up_y, - down_x=ctx.down_x, - down_y=ctx.down_y, - pad_x0=ctx.pad_x0, - pad_x1=ctx.pad_x1, - pad_y0=ctx.pad_y0, - pad_y1=ctx.pad_y1) - # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], - # ctx.out_size[1], ctx.in_size[3]) - gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], - ctx.out_size[0], ctx.out_size[1]) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - batch, channel, in_h, in_w = input.shape - ctx.in_size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_ext.upfirdn2d( - input, - kernel, - up_x=up_x, - up_y=up_y, - down_x=down_x, - down_y=down_y, - pad_x0=pad_x0, - pad_x1=pad_x1, - pad_y0=pad_y0, - pad_y1=pad_y1) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.in_size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - """UpFRIDn for 2d features. - - UpFIRDn is short for upsample, apply FIR filter and downsample. More - details can be found in: - https://www.mathworks.com/help/signal/ref/upfirdn.html - - Args: - input (Tensor): Tensor with shape of (n, c, h, w). - kernel (Tensor): Filter kernel. - up (int | tuple[int], optional): Upsampling factor. If given a number, - we will use this factor for the both height and width side. - Defaults to 1. - down (int | tuple[int], optional): Downsampling factor. If given a - number, we will use this factor for the both height and width side. - Defaults to 1. - pad (tuple[int], optional): Padding for tensors, (x_pad, y_pad) or - (x_pad_0, x_pad_1, y_pad_0, y_pad_1). Defaults to (0, 0). - - Returns: - Tensor: Tensor after UpFIRDn. - """ - if input.device.type == 'cpu': - if len(pad) == 2: - pad = (pad[0], pad[1], pad[0], pad[1]) - - up = to_2tuple(up) - - down = to_2tuple(down) - - out = upfirdn2d_native(input, kernel, up[0], up[1], down[0], down[1], - pad[0], pad[1], pad[2], pad[3]) - else: - _up = to_2tuple(up) - - _down = to_2tuple(down) - - if len(pad) == 4: - _pad = pad - elif len(pad) == 2: - _pad = (pad[0], pad[1], pad[0], pad[1]) - - out = UpFirDn2d.apply(input, kernel, _up, _down, _pad) - - return out - - -def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, - pad_y0, pad_y1): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, - [0, 0, - max(pad_x0, 0), - max(pad_x1, 0), - max(pad_y0, 0), - max(pad_y1, 0)]) - out = out[:, - max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/utils/positional_encoding.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/utils/positional_encoding.py deleted file mode 100644 index 9bda2bbdbfcc28ba6304b6325ae556fa02554ac1..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/utils/positional_encoding.py +++ /dev/null @@ -1,150 +0,0 @@ -import math - -import torch -import torch.nn as nn -from mmcv.cnn import uniform_init - -from .builder import POSITIONAL_ENCODING - - -@POSITIONAL_ENCODING.register_module() -class SinePositionalEncoding(nn.Module): - """Position encoding with sine and cosine functions. - - See `End-to-End Object Detection with Transformers - `_ for details. - - Args: - num_feats (int): The feature dimension for each position - along x-axis or y-axis. Note the final returned dimension - for each position is 2 times of this value. - temperature (int, optional): The temperature used for scaling - the position embedding. Default 10000. - normalize (bool, optional): Whether to normalize the position - embedding. Default False. - scale (float, optional): A scale factor that scales the position - embedding. The scale will be used only when `normalize` is True. - Default 2*pi. - eps (float, optional): A value added to the denominator for - numerical stability. Default 1e-6. - """ - - def __init__(self, - num_feats, - temperature=10000, - normalize=False, - scale=2 * math.pi, - eps=1e-6): - super(SinePositionalEncoding, self).__init__() - if normalize: - assert isinstance(scale, (float, int)), 'when normalize is set,' \ - 'scale should be provided and in float or int type, ' \ - f'found {type(scale)}' - self.num_feats = num_feats - self.temperature = temperature - self.normalize = normalize - self.scale = scale - self.eps = eps - - def forward(self, mask): - """Forward function for `SinePositionalEncoding`. - - Args: - mask (Tensor): ByteTensor mask. Non-zero values representing - ignored positions, while zero values means valid positions - for this image. Shape [bs, h, w]. - - Returns: - pos (Tensor): Returned position embedding with shape - [bs, num_feats*2, h, w]. - """ - not_mask = ~mask - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - if self.normalize: - y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale - dim_t = torch.arange( - self.num_feats, dtype=torch.float32, device=mask.device) - dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack( - (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), - dim=4).flatten(3) - pos_y = torch.stack( - (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), - dim=4).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos - - def __repr__(self): - """str: a string that describes the module""" - repr_str = self.__class__.__name__ - repr_str += f'(num_feats={self.num_feats}, ' - repr_str += f'temperature={self.temperature}, ' - repr_str += f'normalize={self.normalize}, ' - repr_str += f'scale={self.scale}, ' - repr_str += f'eps={self.eps})' - return repr_str - - -@POSITIONAL_ENCODING.register_module() -class LearnedPositionalEncoding(nn.Module): - """Position embedding with learnable embedding weights. - - Args: - num_feats (int): The feature dimension for each position - along x-axis or y-axis. The final returned dimension for - each position is 2 times of this value. - row_num_embed (int, optional): The dictionary size of row embeddings. - Default 50. - col_num_embed (int, optional): The dictionary size of col embeddings. - Default 50. - """ - - def __init__(self, num_feats, row_num_embed=50, col_num_embed=50): - super(LearnedPositionalEncoding, self).__init__() - self.row_embed = nn.Embedding(row_num_embed, num_feats) - self.col_embed = nn.Embedding(col_num_embed, num_feats) - self.num_feats = num_feats - self.row_num_embed = row_num_embed - self.col_num_embed = col_num_embed - self.init_weights() - - def init_weights(self): - """Initialize the learnable weights.""" - uniform_init(self.row_embed) - uniform_init(self.col_embed) - - def forward(self, mask): - """Forward function for `LearnedPositionalEncoding`. - - Args: - mask (Tensor): ByteTensor mask. Non-zero values representing - ignored positions, while zero values means valid positions - for this image. Shape [bs, h, w]. - - Returns: - pos (Tensor): Returned position embedding with shape - [bs, num_feats*2, h, w]. - """ - h, w = mask.shape[-2:] - x = torch.arange(w, device=mask.device) - y = torch.arange(h, device=mask.device) - x_embed = self.col_embed(x) - y_embed = self.row_embed(y) - pos = torch.cat( - (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( - 1, w, 1)), - dim=-1).permute(2, 0, - 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) - return pos - - def __repr__(self): - """str: a string that describes the module""" - repr_str = self.__class__.__name__ - repr_str += f'(num_feats={self.num_feats}, ' - repr_str += f'row_num_embed={self.row_num_embed}, ' - repr_str += f'col_num_embed={self.col_num_embed})' - return repr_str diff --git a/spaces/adirik/kakao-brain-vit/backbone/.ipynb_checkpoints/vit_model-checkpoint.py b/spaces/adirik/kakao-brain-vit/backbone/.ipynb_checkpoints/vit_model-checkpoint.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/akshatsanghvi/spam-email-detection/app.py b/spaces/akshatsanghvi/spam-email-detection/app.py deleted file mode 100644 index c587421a2e7d8db91bb502fc2562ba0cbf336e38..0000000000000000000000000000000000000000 --- a/spaces/akshatsanghvi/spam-email-detection/app.py +++ /dev/null @@ -1,14 +0,0 @@ -import gradio as gr -from joblib import load - -model = load("Classifier.joblib") - -def pred(Email): - l = model.predict([Email]) - if l[0]==1: - return "Spam ⚠️" - else: - return "👍" - -iface = gr.Interface(fn=pred, inputs="text", outputs="text", allow_flagging="never", description="Enter Your Message Below :") -iface.launch() \ No newline at end of file diff --git a/spaces/aliabid94/AutoGPT/tests/test_json_parser.py b/spaces/aliabid94/AutoGPT/tests/test_json_parser.py deleted file mode 100644 index 41c90a6f66c0b0468f1443de80033cc4f268eca0..0000000000000000000000000000000000000000 --- a/spaces/aliabid94/AutoGPT/tests/test_json_parser.py +++ /dev/null @@ -1,111 +0,0 @@ -import unittest - -import tests.context -from autogpt.json_utils.json_fix_llm import fix_and_parse_json - - -class TestParseJson(unittest.TestCase): - def test_valid_json(self): - # Test that a valid JSON string is parsed correctly - json_str = '{"name": "John", "age": 30, "city": "New York"}' - obj = fix_and_parse_json(json_str) - self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) - - def test_invalid_json_minor(self): - # Test that an invalid JSON string can be fixed with gpt - json_str = '{"name": "John", "age": 30, "city": "New York",}' - with self.assertRaises(Exception): - fix_and_parse_json(json_str, try_to_fix_with_gpt=False) - - def test_invalid_json_major_with_gpt(self): - # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' - with self.assertRaises(Exception): - fix_and_parse_json(json_str, try_to_fix_with_gpt=False) - - def test_invalid_json_major_without_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' - # Assert that this raises an exception: - with self.assertRaises(Exception): - fix_and_parse_json(json_str, try_to_fix_with_gpt=False) - - def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = """I suggest we start by browsing the repository to find any issues that we can fix. - -{ - "command": { - "name": "browse_website", - "args":{ - "url": "https://github.com/Torantulino/Auto-GPT" - } - }, - "thoughts": - { - "text": "I suggest we start browsing the repository to find any issues that we can fix.", - "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", - "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", - "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", - "speak": "I will start browsing the repository to find any issues we can fix." - } -}""" - good_obj = { - "command": { - "name": "browse_website", - "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, - }, - "thoughts": { - "text": "I suggest we start browsing the repository to find any issues that we can fix.", - "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", - "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", - "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", - "speak": "I will start browsing the repository to find any issues we can fix.", - }, - } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - - def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. - -{ - "command": { - "name": "browse_website", - "args":{ - "url": "https://github.com/Torantulino/Auto-GPT" - } - }, - "thoughts": - { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs." - } -}""" - good_obj = { - "command": { - "name": "browse_website", - "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, - }, - "thoughts": { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs.", - }, - } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/all-things-vits/Attend-and-Excite/app.py b/spaces/all-things-vits/Attend-and-Excite/app.py deleted file mode 100644 index 67d64f8564dc91d5483e966efb5fe0d35dc818ea..0000000000000000000000000000000000000000 --- a/spaces/all-things-vits/Attend-and-Excite/app.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import os - -import gradio as gr -import PIL.Image - -from model import Model - -DESCRIPTION = '''# Attend-and-Excite -This is a demo for [Attend-and-Excite](https://arxiv.org/abs/2301.13826). -Attend-and-Excite performs attention-based generative semantic guidance to mitigate subject neglect in Stable Diffusion. -Select a prompt and a set of indices matching the subjects you wish to strengthen (the `Check token indices` cell can help map between a word and its index). -''' - -model = Model() - - -def process_example( - prompt: str, - indices_to_alter_str: str, - seed: int, - apply_attend_and_excite: bool, -) -> tuple[list[tuple[int, str]], PIL.Image.Image]: - num_steps = 50 - guidance_scale = 7.5 - - token_table = model.get_token_table(prompt) - result = model.run(prompt, indices_to_alter_str, seed, - apply_attend_and_excite, num_steps, guidance_scale) - return token_table, result - - -with gr.Blocks(css='style.css') as demo: - gr.Markdown(DESCRIPTION) - - with gr.Row(): - with gr.Column(): - prompt = gr.Text( - label='Prompt', - max_lines=1, - placeholder= - 'A pod of dolphins leaping out of the water in an ocean with a ship on the background' - ) - with gr.Accordion(label='Check token indices', open=False): - show_token_indices_button = gr.Button('Show token indices') - token_indices_table = gr.Dataframe(label='Token indices', - headers=['Index', 'Token'], - col_count=2) - token_indices_str = gr.Text( - label= - 'Token indices (a comma-separated list indices of the tokens you wish to alter)', - max_lines=1, - placeholder='4,16') - seed = gr.Slider(label='Seed', - minimum=0, - maximum=100000, - value=0, - step=1) - apply_attend_and_excite = gr.Checkbox( - label='Apply Attend-and-Excite', value=True) - num_steps = gr.Slider(label='Number of steps', - minimum=0, - maximum=100, - step=1, - value=50) - guidance_scale = gr.Slider(label='CFG scale', - minimum=0, - maximum=50, - step=0.1, - value=7.5) - run_button = gr.Button('Generate') - with gr.Column(): - result = gr.Image(label='Result') - - with gr.Row(): - examples = [ - [ - 'A mouse and a red car', - '2,6', - 2098, - True, - ], - [ - 'A mouse and a red car', - '2,6', - 2098, - False, - ], - [ - 'A horse and a dog', - '2,5', - 123, - True, - ], - [ - 'A horse and a dog', - '2,5', - 123, - False, - ], - [ - 'A painting of an elephant with glasses', - '5,7', - 123, - True, - ], - [ - 'A painting of an elephant with glasses', - '5,7', - 123, - False, - ], - [ - 'A playful kitten chasing a butterfly in a wildflower meadow', - '3,6,10', - 123, - True, - ], - [ - 'A playful kitten chasing a butterfly in a wildflower meadow', - '3,6,10', - 123, - False, - ], - [ - 'A grizzly bear catching a salmon in a crystal clear river surrounded by a forest', - '2,6,15', - 123, - True, - ], - [ - 'A grizzly bear catching a salmon in a crystal clear river surrounded by a forest', - '2,6,15', - 123, - False, - ], - [ - 'A pod of dolphins leaping out of the water in an ocean with a ship on the background', - '4,16', - 123, - True, - ], - [ - 'A pod of dolphins leaping out of the water in an ocean with a ship on the background', - '4,16', - 123, - False, - ], - ] - gr.Examples(examples=examples, - inputs=[ - prompt, - token_indices_str, - seed, - apply_attend_and_excite, - ], - outputs=[ - token_indices_table, - result, - ], - fn=process_example, - cache_examples=os.getenv('CACHE_EXAMPLES') == '1', - examples_per_page=20) - - show_token_indices_button.click( - fn=model.get_token_table, - inputs=prompt, - outputs=token_indices_table, - queue=False, - ) - - inputs = [ - prompt, - token_indices_str, - seed, - apply_attend_and_excite, - num_steps, - guidance_scale, - ] - prompt.submit( - fn=model.get_token_table, - inputs=prompt, - outputs=token_indices_table, - queue=False, - ).then( - fn=model.run, - inputs=inputs, - outputs=result, - ) - token_indices_str.submit( - fn=model.get_token_table, - inputs=prompt, - outputs=token_indices_table, - queue=False, - ).then( - fn=model.run, - inputs=inputs, - outputs=result, - ) - run_button.click( - fn=model.get_token_table, - inputs=prompt, - outputs=token_indices_table, - queue=False, - ).then( - fn=model.run, - inputs=inputs, - outputs=result, - api_name='run', - ) - -demo.queue(max_size=10).launch() diff --git a/spaces/allknowingroger/Image-Models-Test137/README.md b/spaces/allknowingroger/Image-Models-Test137/README.md deleted file mode 100644 index b14a9693677a7a0101d9f1a18ead75e3e08f6f4c..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test137/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -duplicated_from: allknowingroger/Image-Models-Test136 ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test30/README.md b/spaces/allknowingroger/Image-Models-Test30/README.md deleted file mode 100644 index eccdbb9a0740c4edfd69cba1efd65c2bd20880aa..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test30/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test27 ---- - - \ No newline at end of file diff --git a/spaces/almakedon/faster-whisper-webui/app-shared.py b/spaces/almakedon/faster-whisper-webui/app-shared.py deleted file mode 100644 index 63cac1a8adaf90784c5f5f178f86243ad2149ee4..0000000000000000000000000000000000000000 --- a/spaces/almakedon/faster-whisper-webui/app-shared.py +++ /dev/null @@ -1,5 +0,0 @@ -# Run the app with no audio file restrictions -from app import create_ui -from src.config import ApplicationConfig - -create_ui(ApplicationConfig.create_default(input_audio_max_duration=-1, share=True)) \ No newline at end of file diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_write_stop_hang_illegal.c b/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_write_stop_hang_illegal.c deleted file mode 100644 index 3d53d4f7aa96b0e7e8fb2bb6aae32311ff6c9342..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_write_stop_hang_illegal.c +++ /dev/null @@ -1,168 +0,0 @@ -/** @file patest_write_stop_threads.c - @brief Call Pa_StopStream() from another thread to see if PortAudio hangs. - @author Bjorn Roche of XO Audio (www.xoaudio.com) - @author Ross Bencina - @author Phil Burk -*/ -/* - * $Id$ - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com/ - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#include -#include -/* pthread may only be available on Mac and Linux. */ -#include -#include "portaudio.h" - -#define SAMPLE_RATE (44100) -#define FRAMES_PER_BUFFER (2048) - -static float s_buffer[FRAMES_PER_BUFFER][2]; /* stereo output buffer */ - -/** - * WARNING: PortAudio is NOT thread safe. DO NOT call PortAudio - * from multiple threads without synchronization. This test uses - * PA in an ILLEGAL WAY in order to try to flush out potential hang bugs. - * The test calls Pa_WriteStream() and Pa_StopStream() simultaneously - * from separate threads in order to try to cause Pa_StopStream() to hang. - * In the main thread we write to the stream in a loop. - * Then try stopping PA from another thread to see if it hangs. - * - * @note: Do not expect this test to pass. The test is only here - * as a debugging aid for hang bugs. Since this test uses PA in an - * illegal way, it may fail for reasons that are not PA bugs. - */ - -/* Wait awhile then abort the stream. */ -void *stop_thread_proc(void *arg) -{ - PaStream *stream = (PaStream *)arg; - PaTime time; - for (int i = 0; i < 20; i++) - { - /* ILLEGAL unsynchronised call to PA, see comment above */ - time = Pa_GetStreamTime( stream ); - printf("Stream time = %f\n", time); - fflush(stdout); - usleep(100 * 1000); - } - printf("Call Pa_StopStream()\n"); - fflush(stdout); - /* ILLEGAL unsynchronised call to PA, see comment above */ - PaError err = Pa_StopStream( stream ); - printf("Pa_StopStream() returned %d\n", err); - fflush(stdout); - - return stream; -} - -int main(void); -int main(void) -{ - PaStreamParameters outputParameters; - PaStream *stream; - PaError err; - int result; - pthread_t thread; - - printf( "PortAudio Test: output silence and stop from another thread. SR = %d, BufSize = %d\n", - SAMPLE_RATE, FRAMES_PER_BUFFER); - - err = Pa_Initialize(); - if( err != paNoError ) goto error; - - outputParameters.device = Pa_GetDefaultOutputDevice(); /* default output device */ - outputParameters.channelCount = 2; /* stereo output */ - outputParameters.sampleFormat = paFloat32; /* 32 bit floating point output */ - outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultHighOutputLatency * 5; - outputParameters.hostApiSpecificStreamInfo = NULL; - - /* open the stream */ - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - NULL, /* no callback, use blocking API */ - NULL ); /* no callback, so no callback userData */ - if( err != paNoError ) goto error; - - result = pthread_create(&thread, NULL /* attributes */, stop_thread_proc, stream); - - /* start the stream */ - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - /* clear buffer */ - memset( s_buffer, 0, sizeof(s_buffer) ); - - /* play the silent buffer many times */ - while( Pa_IsStreamActive(stream) > 0 ) - { - err = Pa_WriteStream( stream, s_buffer, FRAMES_PER_BUFFER ); - printf("Pa_WriteStream returns %d = %s\n", err, Pa_GetErrorText( err )); - if( err != paNoError ) - { - err = paNoError; - break; - }; - } - - printf("Try to join the thread that called Pa_StopStream().\n"); - result = pthread_join( thread, NULL ); - printf("pthread_join returned %d\n", result); - - /* close, and terminate */ - printf("Call Pa_CloseStream\n"); - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - - Pa_Terminate(); - printf("Test finished.\n"); - - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/amsterdamNLP/contrastive-pairs/descr-2.md b/spaces/amsterdamNLP/contrastive-pairs/descr-2.md deleted file mode 100644 index 2ba65dda35b58985c24e7520097de180f4e7ab99..0000000000000000000000000000000000000000 --- a/spaces/amsterdamNLP/contrastive-pairs/descr-2.md +++ /dev/null @@ -1,3 +0,0 @@ -Or you can enter a pair of sentences in the entries below, and click `Run` to get the result for your manual pair. - -*The colors indicate whether the stereotypical or the less stereotypical examples gets the higher score, the intensity of the color how strong the preference is.* diff --git a/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/__init__.py b/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/aritheanalyst/legalsummarizer/style.css b/spaces/aritheanalyst/legalsummarizer/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/aritheanalyst/legalsummarizer/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/configs/shared_configs.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/configs/shared_configs.py deleted file mode 100644 index a558cfcabbc2abc26be60065d3ac75cebd829f28..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/configs/shared_configs.py +++ /dev/null @@ -1,182 +0,0 @@ -from dataclasses import dataclass, field - -from TTS.config import BaseAudioConfig, BaseTrainingConfig - - -@dataclass -class BaseVocoderConfig(BaseTrainingConfig): - """Shared parameters among all the vocoder models. - Args: - audio (BaseAudioConfig): - Audio processor config instance. Defaultsto `BaseAudioConfig()`. - use_noise_augment (bool): - Augment the input audio with random noise. Defaults to False/ - eval_split_size (int): - Number of instances used for evaluation. Defaults to 10. - data_path (str): - Root path of the training data. All the audio files found recursively from this root path are used for - training. Defaults to `""`. - feature_path (str): - Root path to the precomputed feature files. Defaults to None. - seq_len (int): - Length of the waveform segments used for training. Defaults to 1000. - pad_short (int): - Extra padding for the waveforms shorter than `seq_len`. Defaults to 0. - conv_path (int): - Extra padding for the feature frames against convolution of the edge frames. Defaults to MISSING. - Defaults to 0. - use_cache (bool): - enable / disable in memory caching of the computed features. If the RAM is not enough, if may cause OOM. - Defaults to False. - epochs (int): - Number of training epochs to. Defaults to 10000. - wd (float): - Weight decay. - optimizer (torch.optim.Optimizer): - Optimizer used for the training. Defaults to `AdamW`. - optimizer_params (dict): - Optimizer kwargs. Defaults to `{"betas": [0.8, 0.99], "weight_decay": 0.0}` - """ - - audio: BaseAudioConfig = field(default_factory=BaseAudioConfig) - # dataloading - use_noise_augment: bool = False # enable/disable random noise augmentation in spectrograms. - eval_split_size: int = 10 # number of samples used for evaluation. - # dataset - data_path: str = "" # root data path. It finds all wav files recursively from there. - feature_path: str = None # if you use precomputed features - seq_len: int = 1000 # signal length used in training. - pad_short: int = 0 # additional padding for short wavs - conv_pad: int = 0 # additional padding against convolutions applied to spectrograms - use_cache: bool = False # use in memory cache to keep the computed features. This might cause OOM. - # OPTIMIZER - epochs: int = 10000 # total number of epochs to train. - wd: float = 0.0 # Weight decay weight. - optimizer: str = "AdamW" - optimizer_params: dict = field(default_factory=lambda: {"betas": [0.8, 0.99], "weight_decay": 0.0}) - - -@dataclass -class BaseGANVocoderConfig(BaseVocoderConfig): - """Base config class used among all the GAN based vocoders. - Args: - use_stft_loss (bool): - enable / disable the use of STFT loss. Defaults to True. - use_subband_stft_loss (bool): - enable / disable the use of Subband STFT loss. Defaults to True. - use_mse_gan_loss (bool): - enable / disable the use of Mean Squared Error based GAN loss. Defaults to True. - use_hinge_gan_loss (bool): - enable / disable the use of Hinge GAN loss. Defaults to True. - use_feat_match_loss (bool): - enable / disable feature matching loss. Defaults to True. - use_l1_spec_loss (bool): - enable / disable L1 spectrogram loss. Defaults to True. - stft_loss_weight (float): - Loss weight that multiplies the computed loss value. Defaults to 0. - subband_stft_loss_weight (float): - Loss weight that multiplies the computed loss value. Defaults to 0. - mse_G_loss_weight (float): - Loss weight that multiplies the computed loss value. Defaults to 1. - hinge_G_loss_weight (float): - Loss weight that multiplies the computed loss value. Defaults to 0. - feat_match_loss_weight (float): - Loss weight that multiplies the computed loss value. Defaults to 100. - l1_spec_loss_weight (float): - Loss weight that multiplies the computed loss value. Defaults to 45. - stft_loss_params (dict): - Parameters for the STFT loss. Defaults to `{"n_ffts": [1024, 2048, 512], "hop_lengths": [120, 240, 50], "win_lengths": [600, 1200, 240]}`. - l1_spec_loss_params (dict): - Parameters for the L1 spectrogram loss. Defaults to - `{ - "use_mel": True, - "sample_rate": 22050, - "n_fft": 1024, - "hop_length": 256, - "win_length": 1024, - "n_mels": 80, - "mel_fmin": 0.0, - "mel_fmax": None, - }` - target_loss (str): - Target loss name that defines the quality of the model. Defaults to `G_avg_loss`. - grad_clip (list): - A list of gradient clipping theresholds for each optimizer. Any value less than 0 disables clipping. - Defaults to [5, 5]. - lr_gen (float): - Generator model initial learning rate. Defaults to 0.0002. - lr_disc (float): - Discriminator model initial learning rate. Defaults to 0.0002. - lr_scheduler_gen (torch.optim.Scheduler): - Learning rate scheduler for the generator. Defaults to `ExponentialLR`. - lr_scheduler_gen_params (dict): - Parameters for the generator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`. - lr_scheduler_disc (torch.optim.Scheduler): - Learning rate scheduler for the discriminator. Defaults to `ExponentialLR`. - lr_scheduler_disc_params (dict): - Parameters for the discriminator learning rate scheduler. Defaults to `{"gamma": 0.999, "last_epoch": -1}`. - scheduler_after_epoch (bool): - Whether to update the learning rate schedulers after each epoch. Defaults to True. - use_pqmf (bool): - enable / disable PQMF for subband approximation at training. Defaults to False. - steps_to_start_discriminator (int): - Number of steps required to start training the discriminator. Defaults to 0. - diff_samples_for_G_and_D (bool): - enable / disable use of different training samples for the generator and the discriminator iterations. - Enabling it results in slower iterations but faster convergance in some cases. Defaults to False. - """ - - model: str = "gan" - - # LOSS PARAMETERS - use_stft_loss: bool = True - use_subband_stft_loss: bool = True - use_mse_gan_loss: bool = True - use_hinge_gan_loss: bool = True - use_feat_match_loss: bool = True # requires MelGAN Discriminators (MelGAN and HifiGAN) - use_l1_spec_loss: bool = True - - # loss weights - stft_loss_weight: float = 0 - subband_stft_loss_weight: float = 0 - mse_G_loss_weight: float = 1 - hinge_G_loss_weight: float = 0 - feat_match_loss_weight: float = 100 - l1_spec_loss_weight: float = 45 - - stft_loss_params: dict = field( - default_factory=lambda: { - "n_ffts": [1024, 2048, 512], - "hop_lengths": [120, 240, 50], - "win_lengths": [600, 1200, 240], - } - ) - - l1_spec_loss_params: dict = field( - default_factory=lambda: { - "use_mel": True, - "sample_rate": 22050, - "n_fft": 1024, - "hop_length": 256, - "win_length": 1024, - "n_mels": 80, - "mel_fmin": 0.0, - "mel_fmax": None, - } - ) - - target_loss: str = "loss_0" # loss value to pick the best model to save after each epoch - - # optimizer - grad_clip: float = field(default_factory=lambda: [5, 5]) - lr_gen: float = 0.0002 # Initial learning rate. - lr_disc: float = 0.0002 # Initial learning rate. - lr_scheduler_gen: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html - lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1}) - lr_scheduler_disc: str = "ExponentialLR" # one of the schedulers from https:#pytorch.org/docs/stable/optim.html - lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1}) - scheduler_after_epoch: bool = True - - use_pqmf: bool = False # enable/disable using pqmf for multi-band training. (Multi-band MelGAN) - steps_to_start_discriminator = 0 # start training the discriminator after this number of steps. - diff_samples_for_G_and_D: bool = False # use different samples for G and D training steps. diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/__init__.py deleted file mode 100644 index b407fecac19f80171ed45b9ded08d8b145eed92a..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/__init__.py +++ /dev/null @@ -1,216 +0,0 @@ -__version__ = "3.8.3" - -from typing import Tuple - -from . import hdrs as hdrs -from .client import ( - BaseConnector as BaseConnector, - ClientConnectionError as ClientConnectionError, - ClientConnectorCertificateError as ClientConnectorCertificateError, - ClientConnectorError as ClientConnectorError, - ClientConnectorSSLError as ClientConnectorSSLError, - ClientError as ClientError, - ClientHttpProxyError as ClientHttpProxyError, - ClientOSError as ClientOSError, - ClientPayloadError as ClientPayloadError, - ClientProxyConnectionError as ClientProxyConnectionError, - ClientRequest as ClientRequest, - ClientResponse as ClientResponse, - ClientResponseError as ClientResponseError, - ClientSession as ClientSession, - ClientSSLError as ClientSSLError, - ClientTimeout as ClientTimeout, - ClientWebSocketResponse as ClientWebSocketResponse, - ContentTypeError as ContentTypeError, - Fingerprint as Fingerprint, - InvalidURL as InvalidURL, - NamedPipeConnector as NamedPipeConnector, - RequestInfo as RequestInfo, - ServerConnectionError as ServerConnectionError, - ServerDisconnectedError as ServerDisconnectedError, - ServerFingerprintMismatch as ServerFingerprintMismatch, - ServerTimeoutError as ServerTimeoutError, - TCPConnector as TCPConnector, - TooManyRedirects as TooManyRedirects, - UnixConnector as UnixConnector, - WSServerHandshakeError as WSServerHandshakeError, - request as request, -) -from .cookiejar import CookieJar as CookieJar, DummyCookieJar as DummyCookieJar -from .formdata import FormData as FormData -from .helpers import BasicAuth, ChainMapProxy, ETag -from .http import ( - HttpVersion as HttpVersion, - HttpVersion10 as HttpVersion10, - HttpVersion11 as HttpVersion11, - WebSocketError as WebSocketError, - WSCloseCode as WSCloseCode, - WSMessage as WSMessage, - WSMsgType as WSMsgType, -) -from .multipart import ( - BadContentDispositionHeader as BadContentDispositionHeader, - BadContentDispositionParam as BadContentDispositionParam, - BodyPartReader as BodyPartReader, - MultipartReader as MultipartReader, - MultipartWriter as MultipartWriter, - content_disposition_filename as content_disposition_filename, - parse_content_disposition as parse_content_disposition, -) -from .payload import ( - PAYLOAD_REGISTRY as PAYLOAD_REGISTRY, - AsyncIterablePayload as AsyncIterablePayload, - BufferedReaderPayload as BufferedReaderPayload, - BytesIOPayload as BytesIOPayload, - BytesPayload as BytesPayload, - IOBasePayload as IOBasePayload, - JsonPayload as JsonPayload, - Payload as Payload, - StringIOPayload as StringIOPayload, - StringPayload as StringPayload, - TextIOPayload as TextIOPayload, - get_payload as get_payload, - payload_type as payload_type, -) -from .payload_streamer import streamer as streamer -from .resolver import ( - AsyncResolver as AsyncResolver, - DefaultResolver as DefaultResolver, - ThreadedResolver as ThreadedResolver, -) -from .streams import ( - EMPTY_PAYLOAD as EMPTY_PAYLOAD, - DataQueue as DataQueue, - EofStream as EofStream, - FlowControlDataQueue as FlowControlDataQueue, - StreamReader as StreamReader, -) -from .tracing import ( - TraceConfig as TraceConfig, - TraceConnectionCreateEndParams as TraceConnectionCreateEndParams, - TraceConnectionCreateStartParams as TraceConnectionCreateStartParams, - TraceConnectionQueuedEndParams as TraceConnectionQueuedEndParams, - TraceConnectionQueuedStartParams as TraceConnectionQueuedStartParams, - TraceConnectionReuseconnParams as TraceConnectionReuseconnParams, - TraceDnsCacheHitParams as TraceDnsCacheHitParams, - TraceDnsCacheMissParams as TraceDnsCacheMissParams, - TraceDnsResolveHostEndParams as TraceDnsResolveHostEndParams, - TraceDnsResolveHostStartParams as TraceDnsResolveHostStartParams, - TraceRequestChunkSentParams as TraceRequestChunkSentParams, - TraceRequestEndParams as TraceRequestEndParams, - TraceRequestExceptionParams as TraceRequestExceptionParams, - TraceRequestRedirectParams as TraceRequestRedirectParams, - TraceRequestStartParams as TraceRequestStartParams, - TraceResponseChunkReceivedParams as TraceResponseChunkReceivedParams, -) - -__all__: Tuple[str, ...] = ( - "hdrs", - # client - "BaseConnector", - "ClientConnectionError", - "ClientConnectorCertificateError", - "ClientConnectorError", - "ClientConnectorSSLError", - "ClientError", - "ClientHttpProxyError", - "ClientOSError", - "ClientPayloadError", - "ClientProxyConnectionError", - "ClientResponse", - "ClientRequest", - "ClientResponseError", - "ClientSSLError", - "ClientSession", - "ClientTimeout", - "ClientWebSocketResponse", - "ContentTypeError", - "Fingerprint", - "InvalidURL", - "RequestInfo", - "ServerConnectionError", - "ServerDisconnectedError", - "ServerFingerprintMismatch", - "ServerTimeoutError", - "TCPConnector", - "TooManyRedirects", - "UnixConnector", - "NamedPipeConnector", - "WSServerHandshakeError", - "request", - # cookiejar - "CookieJar", - "DummyCookieJar", - # formdata - "FormData", - # helpers - "BasicAuth", - "ChainMapProxy", - "ETag", - # http - "HttpVersion", - "HttpVersion10", - "HttpVersion11", - "WSMsgType", - "WSCloseCode", - "WSMessage", - "WebSocketError", - # multipart - "BadContentDispositionHeader", - "BadContentDispositionParam", - "BodyPartReader", - "MultipartReader", - "MultipartWriter", - "content_disposition_filename", - "parse_content_disposition", - # payload - "AsyncIterablePayload", - "BufferedReaderPayload", - "BytesIOPayload", - "BytesPayload", - "IOBasePayload", - "JsonPayload", - "PAYLOAD_REGISTRY", - "Payload", - "StringIOPayload", - "StringPayload", - "TextIOPayload", - "get_payload", - "payload_type", - # payload_streamer - "streamer", - # resolver - "AsyncResolver", - "DefaultResolver", - "ThreadedResolver", - # streams - "DataQueue", - "EMPTY_PAYLOAD", - "EofStream", - "FlowControlDataQueue", - "StreamReader", - # tracing - "TraceConfig", - "TraceConnectionCreateEndParams", - "TraceConnectionCreateStartParams", - "TraceConnectionQueuedEndParams", - "TraceConnectionQueuedStartParams", - "TraceConnectionReuseconnParams", - "TraceDnsCacheHitParams", - "TraceDnsCacheMissParams", - "TraceDnsResolveHostEndParams", - "TraceDnsResolveHostStartParams", - "TraceRequestChunkSentParams", - "TraceRequestEndParams", - "TraceRequestExceptionParams", - "TraceRequestRedirectParams", - "TraceRequestStartParams", - "TraceResponseChunkReceivedParams", -) - -try: - from .worker import GunicornUVLoopWebWorker, GunicornWebWorker - - __all__ += ("GunicornWebWorker", "GunicornUVLoopWebWorker") -except ImportError: # pragma: no cover - pass diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/violin_plot.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/violin_plot.py deleted file mode 100644 index 66cab47006ac6422a87846bc55bd0cdcad975a7f..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/violin_plot.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Violin Plot ------------ -This example shows how to make a Violin Plot using Altair's density transform. -""" -# category: other charts -import altair as alt -from vega_datasets import data - -alt.Chart(data.cars()).transform_density( - 'Miles_per_Gallon', - as_=['Miles_per_Gallon', 'density'], - extent=[5, 50], - groupby=['Origin'] -).mark_area(orient='horizontal').encode( - y='Miles_per_Gallon:Q', - color='Origin:N', - x=alt.X( - 'density:Q', - stack='center', - impute=None, - title=None, - axis=alt.Axis(labels=False, values=[0],grid=False, ticks=True), - ), - column=alt.Column( - 'Origin:N', - header=alt.Header( - titleOrient='bottom', - labelOrient='bottom', - labelPadding=0, - ), - ) -).properties( - width=100 -).configure_facet( - spacing=0 -).configure_view( - stroke=None -) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vega/v5/display.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vega/v5/display.py deleted file mode 100644 index 3e7f39a603b9c442d791c7a49a2314af4502a9ab..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vega/v5/display.py +++ /dev/null @@ -1,110 +0,0 @@ -import os - -from ...utils import PluginRegistry -from ..display import Displayable -from ..display import default_renderer_base -from ..display import json_renderer_base -from ..display import RendererType -from ..display import HTMLRenderer - -from .schema import SCHEMA_VERSION - -VEGA_VERSION = SCHEMA_VERSION.lstrip("v") -VEGAEMBED_VERSION = "3" - - -# ============================================================================== -# Vega 5 renderer logic -# ============================================================================== - - -# The MIME type for Vega 5 releases. -VEGA_MIME_TYPE = "application/vnd.vega.v5+json" # type: str - -# The entry point group that can be used by other packages to declare other -# renderers that will be auto-detected. Explicit registration is also -# allowed by the PluginRegistery API. -ENTRY_POINT_GROUP = "altair.vega.v5.renderer" # type: str - -# The display message when rendering fails -DEFAULT_DISPLAY = """\ - - -If you see this message, it means the renderer has not been properly enabled -for the frontend that you are using. For more information, see -https://altair-viz.github.io/user_guide/troubleshooting.html -""" - -renderers = PluginRegistry[RendererType](entry_point_group=ENTRY_POINT_GROUP) - - -here = os.path.dirname(os.path.realpath(__file__)) - - -def default_renderer(spec): - return default_renderer_base(spec, VEGA_MIME_TYPE, DEFAULT_DISPLAY) - - -def json_renderer(spec): - return json_renderer_base(spec, DEFAULT_DISPLAY) - - -colab_renderer = HTMLRenderer( - mode="vega", - fullhtml=True, - requirejs=False, - output_div="altair-viz", - vega_version=VEGA_VERSION, - vegaembed_version=VEGAEMBED_VERSION, -) - - -kaggle_renderer = HTMLRenderer( - mode="vega", - fullhtml=False, - requirejs=True, - vega_version=VEGA_VERSION, - vegaembed_version=VEGAEMBED_VERSION, -) - - -html_renderer = HTMLRenderer( - mode="vega", - template="universal", - vega_version=VEGA_VERSION, - vegaembed_version=VEGAEMBED_VERSION, -) - - -renderers.register("default", default_renderer) -renderers.register("html", html_renderer) -renderers.register("jupyterlab", default_renderer) -renderers.register("nteract", default_renderer) -renderers.register("colab", colab_renderer) -renderers.register("kaggle", kaggle_renderer) -renderers.register("json", json_renderer) -renderers.enable("default") - - -class Vega(Displayable): - """An IPython/Jupyter display class for rendering Vega 5.""" - - renderers = renderers - schema_path = (__name__, "schema/vega-schema.json") - - -def vega(spec, validate=True): - """Render and optionally validate a Vega 5 spec. - - This will use the currently enabled renderer to render the spec. - - Parameters - ========== - spec: dict - A fully compliant Vega 5 spec, with the data portion fully processed. - validate: bool - Should the spec be validated against the Vega 5 schema? - """ - from IPython.display import display - - display(Vega(spec, validate=validate)) diff --git a/spaces/asiffarhankhan/custom-gpt-voice-assistant/README.md b/spaces/asiffarhankhan/custom-gpt-voice-assistant/README.md deleted file mode 100644 index db5ab3d058342fc1b6052a7dc17ac66503e5b567..0000000000000000000000000000000000000000 --- a/spaces/asiffarhankhan/custom-gpt-voice-assistant/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Custom Gpt Voice Assistant -emoji: 🌖 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/asimokby/cv-parser-huggingface/Models.py b/spaces/asimokby/cv-parser-huggingface/Models.py deleted file mode 100644 index 87af3f9c35857799e7967d9e0919d0ac4d0eebf8..0000000000000000000000000000000000000000 --- a/spaces/asimokby/cv-parser-huggingface/Models.py +++ /dev/null @@ -1,58 +0,0 @@ -from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoModelForSequenceClassification -from transformers import pipeline -from flair.data import Sentence -from flair.models import SequenceTagger -import pickle - - - -class Models: - - def pickle_it(self, obj, file_name): - with open(f'{file_name}.pickle', 'wb') as f: - pickle.dump(obj, f) - - def unpickle_it(self, file_name): - with open(f'{file_name}.pickle', 'rb') as f: - return pickle.load(f) - - def load_trained_models(self, pickle=False): - #NER (dates) - tokenizer = AutoTokenizer.from_pretrained("Jean-Baptiste/camembert-ner-with-dates") - model = AutoModelForTokenClassification.from_pretrained("Jean-Baptiste/camembert-ner-with-dates") - self.ner_dates = pipeline('ner', model=model, tokenizer=tokenizer, aggregation_strategy="simple") - - #Zero Shot Classification - # self.zero_shot_classifier = pipeline("zero-shot-classification", model='facebook/bart-large-mnli') - self.zero_shot_classifier = pipeline("zero-shot-classification", model='valhalla/distilbart-mnli-12-6') - - # Ner - tokenizer = AutoTokenizer.from_pretrained("dslim/bert-base-NER") - model = AutoModelForTokenClassification.from_pretrained("dslim/bert-base-NER") - self.ner = pipeline('ner', model=model, tokenizer=tokenizer, grouped_entities=True) - - # Pos Tagging - self.tagger = SequenceTagger.load("flair/pos-english-fast") - - - if pickle: - self.pickle_models() - - return self.ner, self.ner_dates, self.zero_shot_classifier, self.tagger - - def pickle_models(self): - self.pickle_it(self.ner, "ner") - self.pickle_it(self.zero_shot_classifier, "zero_shot_classifier_6") - self.pickle_it(self.ner_dates, "ner_dates") - self.pickle_it(self.tagger, "pos_tagger_fast") - - - def load_pickled_models(self): - ner_dates = self.unpickle_it('ner_dates') - ner = self.unpickle_it('ner') - zero_shot_classifier = self.unpickle_it('zero_shot_classifier_6') - tagger = self.unpickle_it("pos_tagger_fast") - return ner_dates, ner, zero_shot_classifier, tagger - - def get_flair_sentence(self, sent): - return Sentence(sent) \ No newline at end of file diff --git a/spaces/atimughal662/InfoFusion/src/evaluate_params.py b/spaces/atimughal662/InfoFusion/src/evaluate_params.py deleted file mode 100644 index 6f69d3f1bb648ab07c63a286326e37edf483f9db..0000000000000000000000000000000000000000 --- a/spaces/atimughal662/InfoFusion/src/evaluate_params.py +++ /dev/null @@ -1,71 +0,0 @@ -input_args_list = ['model_state', 'my_db_state', 'selection_docs_state', 'requests_state'] - -no_default_param_names = [ - 'instruction', - 'iinput', - 'context', - 'instruction_nochat', - 'iinput_nochat', -] - -gen_hyper0 = ['num_beams', - 'max_new_tokens', - 'min_new_tokens', - 'early_stopping', - 'max_time', - 'repetition_penalty', - 'num_return_sequences', - 'do_sample', - ] -gen_hyper = ['temperature', - 'top_p', - 'top_k'] + gen_hyper0 -reader_names = ['image_loaders', 'pdf_loaders', 'url_loaders', 'jq_schema'] - -eval_func_param_names = ['instruction', - 'iinput', - 'context', - 'stream_output', - 'prompt_type', - 'prompt_dict'] + \ - gen_hyper + \ - ['chat', - 'instruction_nochat', - 'iinput_nochat', - 'langchain_mode', - 'add_chat_history_to_context', - 'langchain_action', - 'langchain_agents', - 'top_k_docs', - 'chunk', - 'chunk_size', - 'document_subset', - 'document_choice', - 'pre_prompt_query', - 'prompt_query', - 'pre_prompt_summary', - 'prompt_summary', - 'system_prompt', - ] + \ - reader_names + \ - ['visible_models', - 'h2ogpt_key', - 'add_search_to_context', - 'chat_conversation', - 'text_context_list', - 'docs_ordering_type', - 'min_max_new_tokens', - ] - -# form evaluate defaults for submit_nochat_api -eval_func_param_names_defaults = eval_func_param_names.copy() -for k in no_default_param_names: - if k in eval_func_param_names_defaults: - eval_func_param_names_defaults.remove(k) - -eval_extra_columns = ['prompt', 'response', 'score'] - -# override default_kwargs if user_kwargs None for args evaluate() uses that are not just in model_state -# ensure prompt_type consistent with prep_bot(), so nochat API works same way -# see how default_kwargs is set in gradio_runner.py -key_overrides = ['prompt_type', 'prompt_dict'] diff --git a/spaces/awacke1/Audio-Sentiment-harshit345-xlsr-wav2vec-speech-emotion-recognition/README.md b/spaces/awacke1/Audio-Sentiment-harshit345-xlsr-wav2vec-speech-emotion-recognition/README.md deleted file mode 100644 index c2dd36d172d8a9b6676d0ab7c3a88bf49375c574..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Audio-Sentiment-harshit345-xlsr-wav2vec-speech-emotion-recognition/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Audio Sentiment Harshit345 Xlsr Wav2vec Speech Emotion Recognition -emoji: 📚 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/HTML5-Tower-Building-3D-Game/index.html b/spaces/awacke1/HTML5-Tower-Building-3D-Game/index.html deleted file mode 100644 index aca82737b2d1264fc5e24c3f599052cc183b668a..0000000000000000000000000000000000000000 --- a/spaces/awacke1/HTML5-Tower-Building-3D-Game/index.html +++ /dev/null @@ -1,194 +0,0 @@ - - - - Tower Building Game - - - - - - - -
    - - - - \ No newline at end of file diff --git a/spaces/awacke1/StreamlitAIPP1/backup.py b/spaces/awacke1/StreamlitAIPP1/backup.py deleted file mode 100644 index d2ea9861f7c6c8cdd343ed8ea2e309962169d3d8..0000000000000000000000000000000000000000 --- a/spaces/awacke1/StreamlitAIPP1/backup.py +++ /dev/null @@ -1,22 +0,0 @@ -import streamlit as st -import time - -def main(): - st.title("Simple Streamlit Program") - - # Wait for 5 seconds - with st.spinner("Waiting for 5 seconds..."): - time.sleep(5) - st.success("Completed!") - - # File Upload - st.header("File Upload") - uploaded_file = st.file_uploader("Upload a file") - - if uploaded_file is not None: - file_contents = uploaded_file.read() - st.markdown("### File Contents") - st.markdown(f"```{file_contents.decode('utf-8')}```") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/src/helpers/PositionalAudioHelper.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/helpers/PositionalAudioHelper.d.ts deleted file mode 100644 index 3bccf84c09e38935c55d0479f7b860d806de2b2c..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/helpers/PositionalAudioHelper.d.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { PositionalAudio } from './../audio/PositionalAudio'; -import { Line } from './../objects/Line'; - -export class PositionalAudioHelper extends Line { - constructor(audio: PositionalAudio, range?: number, divisionsInnerAngle?: number, divisionsOuterAngle?: number); - - audio: PositionalAudio; - range: number; - divisionsInnerAngle: number; - divisionsOuterAngle: number; - - dispose(): void; - update(): void; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk.js deleted file mode 100644 index 2adf87807dac15751e2c3e87e905bc4b27f1c634..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk.js +++ /dev/null @@ -1,243 +0,0 @@ -import alphamap_fragment from './ShaderChunk/alphamap_fragment.glsl.js'; -import alphamap_pars_fragment from './ShaderChunk/alphamap_pars_fragment.glsl.js'; -import alphatest_fragment from './ShaderChunk/alphatest_fragment.glsl.js'; -import aomap_fragment from './ShaderChunk/aomap_fragment.glsl.js'; -import aomap_pars_fragment from './ShaderChunk/aomap_pars_fragment.glsl.js'; -import begin_vertex from './ShaderChunk/begin_vertex.glsl.js'; -import beginnormal_vertex from './ShaderChunk/beginnormal_vertex.glsl.js'; -import bsdfs from './ShaderChunk/bsdfs.glsl.js'; -import bumpmap_pars_fragment from './ShaderChunk/bumpmap_pars_fragment.glsl.js'; -import clipping_planes_fragment from './ShaderChunk/clipping_planes_fragment.glsl.js'; -import clipping_planes_pars_fragment from './ShaderChunk/clipping_planes_pars_fragment.glsl.js'; -import clipping_planes_pars_vertex from './ShaderChunk/clipping_planes_pars_vertex.glsl.js'; -import clipping_planes_vertex from './ShaderChunk/clipping_planes_vertex.glsl.js'; -import color_fragment from './ShaderChunk/color_fragment.glsl.js'; -import color_pars_fragment from './ShaderChunk/color_pars_fragment.glsl.js'; -import color_pars_vertex from './ShaderChunk/color_pars_vertex.glsl.js'; -import color_vertex from './ShaderChunk/color_vertex.glsl.js'; -import common from './ShaderChunk/common.glsl.js'; -import cube_uv_reflection_fragment from './ShaderChunk/cube_uv_reflection_fragment.glsl.js'; -import defaultnormal_vertex from './ShaderChunk/defaultnormal_vertex.glsl.js'; -import displacementmap_pars_vertex from './ShaderChunk/displacementmap_pars_vertex.glsl.js'; -import displacementmap_vertex from './ShaderChunk/displacementmap_vertex.glsl.js'; -import emissivemap_fragment from './ShaderChunk/emissivemap_fragment.glsl.js'; -import emissivemap_pars_fragment from './ShaderChunk/emissivemap_pars_fragment.glsl.js'; -import encodings_fragment from './ShaderChunk/encodings_fragment.glsl.js'; -import encodings_pars_fragment from './ShaderChunk/encodings_pars_fragment.glsl.js'; -import envmap_fragment from './ShaderChunk/envmap_fragment.glsl.js'; -import envmap_pars_fragment from './ShaderChunk/envmap_pars_fragment.glsl.js'; -import envmap_pars_vertex from './ShaderChunk/envmap_pars_vertex.glsl.js'; -import envmap_vertex from './ShaderChunk/envmap_vertex.glsl.js'; -import fog_vertex from './ShaderChunk/fog_vertex.glsl.js'; -import fog_pars_vertex from './ShaderChunk/fog_pars_vertex.glsl.js'; -import fog_fragment from './ShaderChunk/fog_fragment.glsl.js'; -import fog_pars_fragment from './ShaderChunk/fog_pars_fragment.glsl.js'; -import gradientmap_pars_fragment from './ShaderChunk/gradientmap_pars_fragment.glsl.js'; -import lightmap_fragment from './ShaderChunk/lightmap_fragment.glsl.js'; -import lightmap_pars_fragment from './ShaderChunk/lightmap_pars_fragment.glsl.js'; -import lights_lambert_vertex from './ShaderChunk/lights_lambert_vertex.glsl.js'; -import lights_pars_begin from './ShaderChunk/lights_pars_begin.glsl.js'; -import envmap_physical_pars_fragment from './ShaderChunk/envmap_physical_pars_fragment.glsl.js'; -import lights_phong_fragment from './ShaderChunk/lights_phong_fragment.glsl.js'; -import lights_phong_pars_fragment from './ShaderChunk/lights_phong_pars_fragment.glsl.js'; -import lights_physical_fragment from './ShaderChunk/lights_physical_fragment.glsl.js'; -import lights_physical_pars_fragment from './ShaderChunk/lights_physical_pars_fragment.glsl.js'; -import lights_fragment_begin from './ShaderChunk/lights_fragment_begin.glsl.js'; -import lights_fragment_maps from './ShaderChunk/lights_fragment_maps.glsl.js'; -import lights_fragment_end from './ShaderChunk/lights_fragment_end.glsl.js'; -import logdepthbuf_fragment from './ShaderChunk/logdepthbuf_fragment.glsl.js'; -import logdepthbuf_pars_fragment from './ShaderChunk/logdepthbuf_pars_fragment.glsl.js'; -import logdepthbuf_pars_vertex from './ShaderChunk/logdepthbuf_pars_vertex.glsl.js'; -import logdepthbuf_vertex from './ShaderChunk/logdepthbuf_vertex.glsl.js'; -import map_fragment from './ShaderChunk/map_fragment.glsl.js'; -import map_pars_fragment from './ShaderChunk/map_pars_fragment.glsl.js'; -import map_particle_fragment from './ShaderChunk/map_particle_fragment.glsl.js'; -import map_particle_pars_fragment from './ShaderChunk/map_particle_pars_fragment.glsl.js'; -import metalnessmap_fragment from './ShaderChunk/metalnessmap_fragment.glsl.js'; -import metalnessmap_pars_fragment from './ShaderChunk/metalnessmap_pars_fragment.glsl.js'; -import morphnormal_vertex from './ShaderChunk/morphnormal_vertex.glsl.js'; -import morphtarget_pars_vertex from './ShaderChunk/morphtarget_pars_vertex.glsl.js'; -import morphtarget_vertex from './ShaderChunk/morphtarget_vertex.glsl.js'; -import normal_fragment_begin from './ShaderChunk/normal_fragment_begin.glsl.js'; -import normal_fragment_maps from './ShaderChunk/normal_fragment_maps.glsl.js'; -import normalmap_pars_fragment from './ShaderChunk/normalmap_pars_fragment.glsl.js'; -import packing from './ShaderChunk/packing.glsl.js'; -import premultiplied_alpha_fragment from './ShaderChunk/premultiplied_alpha_fragment.glsl.js'; -import project_vertex from './ShaderChunk/project_vertex.glsl.js'; -import dithering_fragment from './ShaderChunk/dithering_fragment.glsl.js'; -import dithering_pars_fragment from './ShaderChunk/dithering_pars_fragment.glsl.js'; -import roughnessmap_fragment from './ShaderChunk/roughnessmap_fragment.glsl.js'; -import roughnessmap_pars_fragment from './ShaderChunk/roughnessmap_pars_fragment.glsl.js'; -import shadowmap_pars_fragment from './ShaderChunk/shadowmap_pars_fragment.glsl.js'; -import shadowmap_pars_vertex from './ShaderChunk/shadowmap_pars_vertex.glsl.js'; -import shadowmap_vertex from './ShaderChunk/shadowmap_vertex.glsl.js'; -import shadowmask_pars_fragment from './ShaderChunk/shadowmask_pars_fragment.glsl.js'; -import skinbase_vertex from './ShaderChunk/skinbase_vertex.glsl.js'; -import skinning_pars_vertex from './ShaderChunk/skinning_pars_vertex.glsl.js'; -import skinning_vertex from './ShaderChunk/skinning_vertex.glsl.js'; -import skinnormal_vertex from './ShaderChunk/skinnormal_vertex.glsl.js'; -import specularmap_fragment from './ShaderChunk/specularmap_fragment.glsl.js'; -import specularmap_pars_fragment from './ShaderChunk/specularmap_pars_fragment.glsl.js'; -import tonemapping_fragment from './ShaderChunk/tonemapping_fragment.glsl.js'; -import tonemapping_pars_fragment from './ShaderChunk/tonemapping_pars_fragment.glsl.js'; -import uv_pars_fragment from './ShaderChunk/uv_pars_fragment.glsl.js'; -import uv_pars_vertex from './ShaderChunk/uv_pars_vertex.glsl.js'; -import uv_vertex from './ShaderChunk/uv_vertex.glsl.js'; -import uv2_pars_fragment from './ShaderChunk/uv2_pars_fragment.glsl.js'; -import uv2_pars_vertex from './ShaderChunk/uv2_pars_vertex.glsl.js'; -import uv2_vertex from './ShaderChunk/uv2_vertex.glsl.js'; -import worldpos_vertex from './ShaderChunk/worldpos_vertex.glsl.js'; - -import background_frag from './ShaderLib/background_frag.glsl.js'; -import background_vert from './ShaderLib/background_vert.glsl.js'; -import cube_frag from './ShaderLib/cube_frag.glsl.js'; -import cube_vert from './ShaderLib/cube_vert.glsl.js'; -import depth_frag from './ShaderLib/depth_frag.glsl.js'; -import depth_vert from './ShaderLib/depth_vert.glsl.js'; -import distanceRGBA_frag from './ShaderLib/distanceRGBA_frag.glsl.js'; -import distanceRGBA_vert from './ShaderLib/distanceRGBA_vert.glsl.js'; -import equirect_frag from './ShaderLib/equirect_frag.glsl.js'; -import equirect_vert from './ShaderLib/equirect_vert.glsl.js'; -import linedashed_frag from './ShaderLib/linedashed_frag.glsl.js'; -import linedashed_vert from './ShaderLib/linedashed_vert.glsl.js'; -import meshbasic_frag from './ShaderLib/meshbasic_frag.glsl.js'; -import meshbasic_vert from './ShaderLib/meshbasic_vert.glsl.js'; -import meshlambert_frag from './ShaderLib/meshlambert_frag.glsl.js'; -import meshlambert_vert from './ShaderLib/meshlambert_vert.glsl.js'; -import meshmatcap_frag from './ShaderLib/meshmatcap_frag.glsl.js'; -import meshmatcap_vert from './ShaderLib/meshmatcap_vert.glsl.js'; -import meshphong_frag from './ShaderLib/meshphong_frag.glsl.js'; -import meshphong_vert from './ShaderLib/meshphong_vert.glsl.js'; -import meshphysical_frag from './ShaderLib/meshphysical_frag.glsl.js'; -import meshphysical_vert from './ShaderLib/meshphysical_vert.glsl.js'; -import normal_frag from './ShaderLib/normal_frag.glsl.js'; -import normal_vert from './ShaderLib/normal_vert.glsl.js'; -import points_frag from './ShaderLib/points_frag.glsl.js'; -import points_vert from './ShaderLib/points_vert.glsl.js'; -import shadow_frag from './ShaderLib/shadow_frag.glsl.js'; -import shadow_vert from './ShaderLib/shadow_vert.glsl.js'; -import sprite_frag from './ShaderLib/sprite_frag.glsl.js'; -import sprite_vert from './ShaderLib/sprite_vert.glsl.js'; - -export var ShaderChunk = { - alphamap_fragment: alphamap_fragment, - alphamap_pars_fragment: alphamap_pars_fragment, - alphatest_fragment: alphatest_fragment, - aomap_fragment: aomap_fragment, - aomap_pars_fragment: aomap_pars_fragment, - begin_vertex: begin_vertex, - beginnormal_vertex: beginnormal_vertex, - bsdfs: bsdfs, - bumpmap_pars_fragment: bumpmap_pars_fragment, - clipping_planes_fragment: clipping_planes_fragment, - clipping_planes_pars_fragment: clipping_planes_pars_fragment, - clipping_planes_pars_vertex: clipping_planes_pars_vertex, - clipping_planes_vertex: clipping_planes_vertex, - color_fragment: color_fragment, - color_pars_fragment: color_pars_fragment, - color_pars_vertex: color_pars_vertex, - color_vertex: color_vertex, - common: common, - cube_uv_reflection_fragment: cube_uv_reflection_fragment, - defaultnormal_vertex: defaultnormal_vertex, - displacementmap_pars_vertex: displacementmap_pars_vertex, - displacementmap_vertex: displacementmap_vertex, - emissivemap_fragment: emissivemap_fragment, - emissivemap_pars_fragment: emissivemap_pars_fragment, - encodings_fragment: encodings_fragment, - encodings_pars_fragment: encodings_pars_fragment, - envmap_fragment: envmap_fragment, - envmap_pars_fragment: envmap_pars_fragment, - envmap_pars_vertex: envmap_pars_vertex, - envmap_physical_pars_fragment: envmap_physical_pars_fragment, - envmap_vertex: envmap_vertex, - fog_vertex: fog_vertex, - fog_pars_vertex: fog_pars_vertex, - fog_fragment: fog_fragment, - fog_pars_fragment: fog_pars_fragment, - gradientmap_pars_fragment: gradientmap_pars_fragment, - lightmap_fragment: lightmap_fragment, - lightmap_pars_fragment: lightmap_pars_fragment, - lights_lambert_vertex: lights_lambert_vertex, - lights_pars_begin: lights_pars_begin, - lights_phong_fragment: lights_phong_fragment, - lights_phong_pars_fragment: lights_phong_pars_fragment, - lights_physical_fragment: lights_physical_fragment, - lights_physical_pars_fragment: lights_physical_pars_fragment, - lights_fragment_begin: lights_fragment_begin, - lights_fragment_maps: lights_fragment_maps, - lights_fragment_end: lights_fragment_end, - logdepthbuf_fragment: logdepthbuf_fragment, - logdepthbuf_pars_fragment: logdepthbuf_pars_fragment, - logdepthbuf_pars_vertex: logdepthbuf_pars_vertex, - logdepthbuf_vertex: logdepthbuf_vertex, - map_fragment: map_fragment, - map_pars_fragment: map_pars_fragment, - map_particle_fragment: map_particle_fragment, - map_particle_pars_fragment: map_particle_pars_fragment, - metalnessmap_fragment: metalnessmap_fragment, - metalnessmap_pars_fragment: metalnessmap_pars_fragment, - morphnormal_vertex: morphnormal_vertex, - morphtarget_pars_vertex: morphtarget_pars_vertex, - morphtarget_vertex: morphtarget_vertex, - normal_fragment_begin: normal_fragment_begin, - normal_fragment_maps: normal_fragment_maps, - normalmap_pars_fragment: normalmap_pars_fragment, - packing: packing, - premultiplied_alpha_fragment: premultiplied_alpha_fragment, - project_vertex: project_vertex, - dithering_fragment: dithering_fragment, - dithering_pars_fragment: dithering_pars_fragment, - roughnessmap_fragment: roughnessmap_fragment, - roughnessmap_pars_fragment: roughnessmap_pars_fragment, - shadowmap_pars_fragment: shadowmap_pars_fragment, - shadowmap_pars_vertex: shadowmap_pars_vertex, - shadowmap_vertex: shadowmap_vertex, - shadowmask_pars_fragment: shadowmask_pars_fragment, - skinbase_vertex: skinbase_vertex, - skinning_pars_vertex: skinning_pars_vertex, - skinning_vertex: skinning_vertex, - skinnormal_vertex: skinnormal_vertex, - specularmap_fragment: specularmap_fragment, - specularmap_pars_fragment: specularmap_pars_fragment, - tonemapping_fragment: tonemapping_fragment, - tonemapping_pars_fragment: tonemapping_pars_fragment, - uv_pars_fragment: uv_pars_fragment, - uv_pars_vertex: uv_pars_vertex, - uv_vertex: uv_vertex, - uv2_pars_fragment: uv2_pars_fragment, - uv2_pars_vertex: uv2_pars_vertex, - uv2_vertex: uv2_vertex, - worldpos_vertex: worldpos_vertex, - - background_frag: background_frag, - background_vert: background_vert, - cube_frag: cube_frag, - cube_vert: cube_vert, - depth_frag: depth_frag, - depth_vert: depth_vert, - distanceRGBA_frag: distanceRGBA_frag, - distanceRGBA_vert: distanceRGBA_vert, - equirect_frag: equirect_frag, - equirect_vert: equirect_vert, - linedashed_frag: linedashed_frag, - linedashed_vert: linedashed_vert, - meshbasic_frag: meshbasic_frag, - meshbasic_vert: meshbasic_vert, - meshlambert_frag: meshlambert_frag, - meshlambert_vert: meshlambert_vert, - meshmatcap_frag: meshmatcap_frag, - meshmatcap_vert: meshmatcap_vert, - meshphong_frag: meshphong_frag, - meshphong_vert: meshphong_vert, - meshphysical_frag: meshphysical_frag, - meshphysical_vert: meshphysical_vert, - normal_frag: normal_frag, - normal_vert: normal_vert, - points_frag: points_frag, - points_vert: points_vert, - shadow_frag: shadow_frag, - shadow_vert: shadow_vert, - sprite_frag: sprite_frag, - sprite_vert: sprite_vert -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLProperties.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLProperties.js deleted file mode 100644 index e875912bfe22549e63f529ed6061fb723212d6f6..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLProperties.js +++ /dev/null @@ -1,52 +0,0 @@ -/** - * @author fordacious / fordacious.github.io - */ - -function WebGLProperties() { - - var properties = new WeakMap(); - - function get( object ) { - - var map = properties.get( object ); - - if ( map === undefined ) { - - map = {}; - properties.set( object, map ); - - } - - return map; - - } - - function remove( object ) { - - properties.delete( object ); - - } - - function update( object, key, value ) { - - properties.get( object )[ key ] = value; - - } - - function dispose() { - - properties = new WeakMap(); - - } - - return { - get: get, - remove: remove, - update: update, - dispose: dispose - }; - -} - - -export { WebGLProperties }; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLTextures.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLTextures.js deleted file mode 100644 index fdfb2e5d37ed41dd8b88910289a386fa237abf51..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLTextures.js +++ /dev/null @@ -1,1181 +0,0 @@ -/** - * @author mrdoob / http://mrdoob.com/ - */ - -import { LinearFilter, NearestFilter, RGBFormat, RGBAFormat, DepthFormat, DepthStencilFormat, UnsignedShortType, UnsignedIntType, UnsignedInt248Type, FloatType, HalfFloatType, ClampToEdgeWrapping, NearestMipMapLinearFilter, NearestMipMapNearestFilter } from '../../constants.js'; -import { _Math } from '../../math/Math.js'; - -function WebGLTextures( _gl, extensions, state, properties, capabilities, utils, info ) { - - var _videoTextures = {}; - var _canvas; - - // - - var useOffscreenCanvas = typeof OffscreenCanvas !== 'undefined'; - - function createCanvas( width, height ) { - - // Use OffscreenCanvas when available. Specially needed in web workers - - return useOffscreenCanvas ? - new OffscreenCanvas( width, height ) : - document.createElementNS( 'http://www.w3.org/1999/xhtml', 'canvas' ); - - } - - function resizeImage( image, needsPowerOfTwo, needsNewCanvas, maxSize ) { - - var scale = 1; - - // handle case if texture exceeds max size - - if ( image.width > maxSize || image.height > maxSize ) { - - scale = maxSize / Math.max( image.width, image.height ); - - } - - // only perform resize if necessary - - if ( scale < 1 || needsPowerOfTwo === true ) { - - // only perform resize for certain image types - - if ( ( typeof HTMLImageElement !== 'undefined' && image instanceof HTMLImageElement ) || - ( typeof HTMLCanvasElement !== 'undefined' && image instanceof HTMLCanvasElement ) || - ( typeof ImageBitmap !== 'undefined' && image instanceof ImageBitmap ) ) { - - var floor = needsPowerOfTwo ? _Math.floorPowerOfTwo : Math.floor; - - var width = floor( scale * image.width ); - var height = floor( scale * image.height ); - - if ( _canvas === undefined ) _canvas = createCanvas( width, height ); - - // cube textures can't reuse the same canvas - - var canvas = needsNewCanvas ? createCanvas( width, height ) : _canvas; - - canvas.width = width; - canvas.height = height; - - var context = canvas.getContext( '2d' ); - context.drawImage( image, 0, 0, width, height ); - - console.warn( 'THREE.WebGLRenderer: Texture has been resized from (' + image.width + 'x' + image.height + ') to (' + width + 'x' + height + ').' ); - - return canvas; - - } else { - - if ( 'data' in image ) { - - console.warn( 'THREE.WebGLRenderer: Image in DataTexture is too big (' + image.width + 'x' + image.height + ').' ); - - } - - return image; - - } - - } - - return image; - - } - - function isPowerOfTwo( image ) { - - return _Math.isPowerOfTwo( image.width ) && _Math.isPowerOfTwo( image.height ); - - } - - function textureNeedsPowerOfTwo( texture ) { - - if ( capabilities.isWebGL2 ) return false; - - return ( texture.wrapS !== ClampToEdgeWrapping || texture.wrapT !== ClampToEdgeWrapping ) || - ( texture.minFilter !== NearestFilter && texture.minFilter !== LinearFilter ); - - } - - function textureNeedsGenerateMipmaps( texture, supportsMips ) { - - return texture.generateMipmaps && supportsMips && - texture.minFilter !== NearestFilter && texture.minFilter !== LinearFilter; - - } - - function generateMipmap( target, texture, width, height ) { - - _gl.generateMipmap( target ); - - var textureProperties = properties.get( texture ); - - // Note: Math.log( x ) * Math.LOG2E used instead of Math.log2( x ) which is not supported by IE11 - textureProperties.__maxMipLevel = Math.log( Math.max( width, height ) ) * Math.LOG2E; - - } - - function getInternalFormat( glFormat, glType ) { - - if ( ! capabilities.isWebGL2 ) return glFormat; - - var internalFormat = glFormat; - - if ( glFormat === _gl.RED ) { - - if ( glType === _gl.FLOAT ) internalFormat = _gl.R32F; - if ( glType === _gl.HALF_FLOAT ) internalFormat = _gl.R16F; - if ( glType === _gl.UNSIGNED_BYTE ) internalFormat = _gl.R8; - - } - - if ( glFormat === _gl.RGB ) { - - if ( glType === _gl.FLOAT ) internalFormat = _gl.RGB32F; - if ( glType === _gl.HALF_FLOAT ) internalFormat = _gl.RGB16F; - if ( glType === _gl.UNSIGNED_BYTE ) internalFormat = _gl.RGB8; - - } - - if ( glFormat === _gl.RGBA ) { - - if ( glType === _gl.FLOAT ) internalFormat = _gl.RGBA32F; - if ( glType === _gl.HALF_FLOAT ) internalFormat = _gl.RGBA16F; - if ( glType === _gl.UNSIGNED_BYTE ) internalFormat = _gl.RGBA8; - - } - - if ( internalFormat === _gl.R16F || internalFormat === _gl.R32F || - internalFormat === _gl.RGBA16F || internalFormat === _gl.RGBA32F ) { - - extensions.get( 'EXT_color_buffer_float' ); - - } else if ( internalFormat === _gl.RGB16F || internalFormat === _gl.RGB32F ) { - - console.warn( 'THREE.WebGLRenderer: Floating point textures with RGB format not supported. Please use RGBA instead.' ); - - } - - return internalFormat; - - } - - // Fallback filters for non-power-of-2 textures - - function filterFallback( f ) { - - if ( f === NearestFilter || f === NearestMipMapNearestFilter || f === NearestMipMapLinearFilter ) { - - return _gl.NEAREST; - - } - - return _gl.LINEAR; - - } - - // - - function onTextureDispose( event ) { - - var texture = event.target; - - texture.removeEventListener( 'dispose', onTextureDispose ); - - deallocateTexture( texture ); - - if ( texture.isVideoTexture ) { - - delete _videoTextures[ texture.id ]; - - } - - info.memory.textures --; - - } - - function onRenderTargetDispose( event ) { - - var renderTarget = event.target; - - renderTarget.removeEventListener( 'dispose', onRenderTargetDispose ); - - deallocateRenderTarget( renderTarget ); - - info.memory.textures --; - - } - - // - - function deallocateTexture( texture ) { - - var textureProperties = properties.get( texture ); - - if ( textureProperties.__webglInit === undefined ) return; - - _gl.deleteTexture( textureProperties.__webglTexture ); - - properties.remove( texture ); - - } - - function deallocateRenderTarget( renderTarget ) { - - var renderTargetProperties = properties.get( renderTarget ); - var textureProperties = properties.get( renderTarget.texture ); - - if ( ! renderTarget ) return; - - if ( textureProperties.__webglTexture !== undefined ) { - - _gl.deleteTexture( textureProperties.__webglTexture ); - - } - - if ( renderTarget.depthTexture ) { - - renderTarget.depthTexture.dispose(); - - } - - if ( renderTarget.isWebGLRenderTargetCube ) { - - for ( var i = 0; i < 6; i ++ ) { - - _gl.deleteFramebuffer( renderTargetProperties.__webglFramebuffer[ i ] ); - if ( renderTargetProperties.__webglDepthbuffer ) _gl.deleteRenderbuffer( renderTargetProperties.__webglDepthbuffer[ i ] ); - - } - - } else { - - _gl.deleteFramebuffer( renderTargetProperties.__webglFramebuffer ); - if ( renderTargetProperties.__webglDepthbuffer ) _gl.deleteRenderbuffer( renderTargetProperties.__webglDepthbuffer ); - - } - - properties.remove( renderTarget.texture ); - properties.remove( renderTarget ); - - } - - // - - var textureUnits = 0; - - function resetTextureUnits() { - - textureUnits = 0; - - } - - function allocateTextureUnit() { - - var textureUnit = textureUnits; - - if ( textureUnit >= capabilities.maxTextures ) { - - console.warn( 'THREE.WebGLTextures: Trying to use ' + textureUnit + ' texture units while this GPU supports only ' + capabilities.maxTextures ); - - } - - textureUnits += 1; - - return textureUnit; - - } - - // - - function setTexture2D( texture, slot ) { - - var textureProperties = properties.get( texture ); - - if ( texture.isVideoTexture ) updateVideoTexture( texture ); - - if ( texture.version > 0 && textureProperties.__version !== texture.version ) { - - var image = texture.image; - - if ( image === undefined ) { - - console.warn( 'THREE.WebGLRenderer: Texture marked for update but image is undefined' ); - - } else if ( image.complete === false ) { - - console.warn( 'THREE.WebGLRenderer: Texture marked for update but image is incomplete' ); - - } else { - - uploadTexture( textureProperties, texture, slot ); - return; - - } - - } - - state.activeTexture( _gl.TEXTURE0 + slot ); - state.bindTexture( _gl.TEXTURE_2D, textureProperties.__webglTexture ); - - } - - function setTexture2DArray( texture, slot ) { - - var textureProperties = properties.get( texture ); - - if ( texture.version > 0 && textureProperties.__version !== texture.version ) { - - uploadTexture( textureProperties, texture, slot ); - return; - - } - - state.activeTexture( _gl.TEXTURE0 + slot ); - state.bindTexture( _gl.TEXTURE_2D_ARRAY, textureProperties.__webglTexture ); - - } - - function setTexture3D( texture, slot ) { - - var textureProperties = properties.get( texture ); - - if ( texture.version > 0 && textureProperties.__version !== texture.version ) { - - uploadTexture( textureProperties, texture, slot ); - return; - - } - - state.activeTexture( _gl.TEXTURE0 + slot ); - state.bindTexture( _gl.TEXTURE_3D, textureProperties.__webglTexture ); - - } - - function setTextureCube( texture, slot ) { - - var textureProperties = properties.get( texture ); - - if ( texture.image.length === 6 ) { - - if ( texture.version > 0 && textureProperties.__version !== texture.version ) { - - initTexture( textureProperties, texture ); - - state.activeTexture( _gl.TEXTURE0 + slot ); - state.bindTexture( _gl.TEXTURE_CUBE_MAP, textureProperties.__webglTexture ); - - _gl.pixelStorei( _gl.UNPACK_FLIP_Y_WEBGL, texture.flipY ); - - var isCompressed = ( texture && texture.isCompressedTexture ); - var isDataTexture = ( texture.image[ 0 ] && texture.image[ 0 ].isDataTexture ); - - var cubeImage = []; - - for ( var i = 0; i < 6; i ++ ) { - - if ( ! isCompressed && ! isDataTexture ) { - - cubeImage[ i ] = resizeImage( texture.image[ i ], false, true, capabilities.maxCubemapSize ); - - } else { - - cubeImage[ i ] = isDataTexture ? texture.image[ i ].image : texture.image[ i ]; - - } - - } - - var image = cubeImage[ 0 ], - supportsMips = isPowerOfTwo( image ) || capabilities.isWebGL2, - glFormat = utils.convert( texture.format ), - glType = utils.convert( texture.type ), - glInternalFormat = getInternalFormat( glFormat, glType ); - - setTextureParameters( _gl.TEXTURE_CUBE_MAP, texture, supportsMips ); - - for ( var i = 0; i < 6; i ++ ) { - - if ( ! isCompressed ) { - - if ( isDataTexture ) { - - state.texImage2D( _gl.TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, glInternalFormat, cubeImage[ i ].width, cubeImage[ i ].height, 0, glFormat, glType, cubeImage[ i ].data ); - - } else { - - state.texImage2D( _gl.TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, glInternalFormat, glFormat, glType, cubeImage[ i ] ); - - } - - } else { - - var mipmap, mipmaps = cubeImage[ i ].mipmaps; - - for ( var j = 0, jl = mipmaps.length; j < jl; j ++ ) { - - mipmap = mipmaps[ j ]; - - if ( texture.format !== RGBAFormat && texture.format !== RGBFormat ) { - - if ( state.getCompressedTextureFormats().indexOf( glFormat ) > - 1 ) { - - state.compressedTexImage2D( _gl.TEXTURE_CUBE_MAP_POSITIVE_X + i, j, glInternalFormat, mipmap.width, mipmap.height, 0, mipmap.data ); - - } else { - - console.warn( 'THREE.WebGLRenderer: Attempt to load unsupported compressed texture format in .setTextureCube()' ); - - } - - } else { - - state.texImage2D( _gl.TEXTURE_CUBE_MAP_POSITIVE_X + i, j, glInternalFormat, mipmap.width, mipmap.height, 0, glFormat, glType, mipmap.data ); - - } - - } - - } - - } - - if ( ! isCompressed ) { - - textureProperties.__maxMipLevel = 0; - - } else { - - textureProperties.__maxMipLevel = mipmaps.length - 1; - - } - - if ( textureNeedsGenerateMipmaps( texture, supportsMips ) ) { - - // We assume images for cube map have the same size. - generateMipmap( _gl.TEXTURE_CUBE_MAP, texture, image.width, image.height ); - - } - - textureProperties.__version = texture.version; - - if ( texture.onUpdate ) texture.onUpdate( texture ); - - } else { - - state.activeTexture( _gl.TEXTURE0 + slot ); - state.bindTexture( _gl.TEXTURE_CUBE_MAP, textureProperties.__webglTexture ); - - } - - } - - } - - function setTextureCubeDynamic( texture, slot ) { - - state.activeTexture( _gl.TEXTURE0 + slot ); - state.bindTexture( _gl.TEXTURE_CUBE_MAP, properties.get( texture ).__webglTexture ); - - } - - function setTextureParameters( textureType, texture, supportsMips ) { - - var extension; - - if ( supportsMips ) { - - _gl.texParameteri( textureType, _gl.TEXTURE_WRAP_S, utils.convert( texture.wrapS ) ); - _gl.texParameteri( textureType, _gl.TEXTURE_WRAP_T, utils.convert( texture.wrapT ) ); - - if ( textureType === _gl.TEXTURE_3D || textureType === _gl.TEXTURE_2D_ARRAY ) { - - _gl.texParameteri( textureType, _gl.TEXTURE_WRAP_R, utils.convert( texture.wrapR ) ); - - } - - _gl.texParameteri( textureType, _gl.TEXTURE_MAG_FILTER, utils.convert( texture.magFilter ) ); - _gl.texParameteri( textureType, _gl.TEXTURE_MIN_FILTER, utils.convert( texture.minFilter ) ); - - } else { - - _gl.texParameteri( textureType, _gl.TEXTURE_WRAP_S, _gl.CLAMP_TO_EDGE ); - _gl.texParameteri( textureType, _gl.TEXTURE_WRAP_T, _gl.CLAMP_TO_EDGE ); - - if ( textureType === _gl.TEXTURE_3D || textureType === _gl.TEXTURE_2D_ARRAY ) { - - _gl.texParameteri( textureType, _gl.TEXTURE_WRAP_R, _gl.CLAMP_TO_EDGE ); - - } - - if ( texture.wrapS !== ClampToEdgeWrapping || texture.wrapT !== ClampToEdgeWrapping ) { - - console.warn( 'THREE.WebGLRenderer: Texture is not power of two. Texture.wrapS and Texture.wrapT should be set to THREE.ClampToEdgeWrapping.' ); - - } - - _gl.texParameteri( textureType, _gl.TEXTURE_MAG_FILTER, filterFallback( texture.magFilter ) ); - _gl.texParameteri( textureType, _gl.TEXTURE_MIN_FILTER, filterFallback( texture.minFilter ) ); - - if ( texture.minFilter !== NearestFilter && texture.minFilter !== LinearFilter ) { - - console.warn( 'THREE.WebGLRenderer: Texture is not power of two. Texture.minFilter should be set to THREE.NearestFilter or THREE.LinearFilter.' ); - - } - - } - - extension = extensions.get( 'EXT_texture_filter_anisotropic' ); - - if ( extension ) { - - if ( texture.type === FloatType && extensions.get( 'OES_texture_float_linear' ) === null ) return; - if ( texture.type === HalfFloatType && ( capabilities.isWebGL2 || extensions.get( 'OES_texture_half_float_linear' ) ) === null ) return; - - if ( texture.anisotropy > 1 || properties.get( texture ).__currentAnisotropy ) { - - _gl.texParameterf( textureType, extension.TEXTURE_MAX_ANISOTROPY_EXT, Math.min( texture.anisotropy, capabilities.getMaxAnisotropy() ) ); - properties.get( texture ).__currentAnisotropy = texture.anisotropy; - - } - - } - - } - - function initTexture( textureProperties, texture ) { - - if ( textureProperties.__webglInit === undefined ) { - - textureProperties.__webglInit = true; - - texture.addEventListener( 'dispose', onTextureDispose ); - - textureProperties.__webglTexture = _gl.createTexture(); - - info.memory.textures ++; - - } - - } - - function uploadTexture( textureProperties, texture, slot ) { - - var textureType = _gl.TEXTURE_2D; - - if ( texture.isDataTexture2DArray ) textureType = _gl.TEXTURE_2D_ARRAY; - if ( texture.isDataTexture3D ) textureType = _gl.TEXTURE_3D; - - initTexture( textureProperties, texture ); - - state.activeTexture( _gl.TEXTURE0 + slot ); - state.bindTexture( textureType, textureProperties.__webglTexture ); - - _gl.pixelStorei( _gl.UNPACK_FLIP_Y_WEBGL, texture.flipY ); - _gl.pixelStorei( _gl.UNPACK_PREMULTIPLY_ALPHA_WEBGL, texture.premultiplyAlpha ); - _gl.pixelStorei( _gl.UNPACK_ALIGNMENT, texture.unpackAlignment ); - - var needsPowerOfTwo = textureNeedsPowerOfTwo( texture ) && isPowerOfTwo( texture.image ) === false; - var image = resizeImage( texture.image, needsPowerOfTwo, false, capabilities.maxTextureSize ); - - var supportsMips = isPowerOfTwo( image ) || capabilities.isWebGL2, - glFormat = utils.convert( texture.format ), - glType = utils.convert( texture.type ), - glInternalFormat = getInternalFormat( glFormat, glType ); - - setTextureParameters( textureType, texture, supportsMips ); - - var mipmap, mipmaps = texture.mipmaps; - - if ( texture.isDepthTexture ) { - - // populate depth texture with dummy data - - glInternalFormat = _gl.DEPTH_COMPONENT; - - if ( texture.type === FloatType ) { - - if ( ! capabilities.isWebGL2 ) throw new Error( 'Float Depth Texture only supported in WebGL2.0' ); - glInternalFormat = _gl.DEPTH_COMPONENT32F; - - } else if ( capabilities.isWebGL2 ) { - - // WebGL 2.0 requires signed internalformat for glTexImage2D - glInternalFormat = _gl.DEPTH_COMPONENT16; - - } - - if ( texture.format === DepthFormat && glInternalFormat === _gl.DEPTH_COMPONENT ) { - - // The error INVALID_OPERATION is generated by texImage2D if format and internalformat are - // DEPTH_COMPONENT and type is not UNSIGNED_SHORT or UNSIGNED_INT - // (https://www.khronos.org/registry/webgl/extensions/WEBGL_depth_texture/) - if ( texture.type !== UnsignedShortType && texture.type !== UnsignedIntType ) { - - console.warn( 'THREE.WebGLRenderer: Use UnsignedShortType or UnsignedIntType for DepthFormat DepthTexture.' ); - - texture.type = UnsignedShortType; - glType = utils.convert( texture.type ); - - } - - } - - // Depth stencil textures need the DEPTH_STENCIL internal format - // (https://www.khronos.org/registry/webgl/extensions/WEBGL_depth_texture/) - if ( texture.format === DepthStencilFormat ) { - - glInternalFormat = _gl.DEPTH_STENCIL; - - // The error INVALID_OPERATION is generated by texImage2D if format and internalformat are - // DEPTH_STENCIL and type is not UNSIGNED_INT_24_8_WEBGL. - // (https://www.khronos.org/registry/webgl/extensions/WEBGL_depth_texture/) - if ( texture.type !== UnsignedInt248Type ) { - - console.warn( 'THREE.WebGLRenderer: Use UnsignedInt248Type for DepthStencilFormat DepthTexture.' ); - - texture.type = UnsignedInt248Type; - glType = utils.convert( texture.type ); - - } - - } - - state.texImage2D( _gl.TEXTURE_2D, 0, glInternalFormat, image.width, image.height, 0, glFormat, glType, null ); - - } else if ( texture.isDataTexture ) { - - // use manually created mipmaps if available - // if there are no manual mipmaps - // set 0 level mipmap and then use GL to generate other mipmap levels - - if ( mipmaps.length > 0 && supportsMips ) { - - for ( var i = 0, il = mipmaps.length; i < il; i ++ ) { - - mipmap = mipmaps[ i ]; - state.texImage2D( _gl.TEXTURE_2D, i, glInternalFormat, mipmap.width, mipmap.height, 0, glFormat, glType, mipmap.data ); - - } - - texture.generateMipmaps = false; - textureProperties.__maxMipLevel = mipmaps.length - 1; - - } else { - - state.texImage2D( _gl.TEXTURE_2D, 0, glInternalFormat, image.width, image.height, 0, glFormat, glType, image.data ); - textureProperties.__maxMipLevel = 0; - - } - - } else if ( texture.isCompressedTexture ) { - - for ( var i = 0, il = mipmaps.length; i < il; i ++ ) { - - mipmap = mipmaps[ i ]; - - if ( texture.format !== RGBAFormat && texture.format !== RGBFormat ) { - - if ( state.getCompressedTextureFormats().indexOf( glFormat ) > - 1 ) { - - state.compressedTexImage2D( _gl.TEXTURE_2D, i, glInternalFormat, mipmap.width, mipmap.height, 0, mipmap.data ); - - } else { - - console.warn( 'THREE.WebGLRenderer: Attempt to load unsupported compressed texture format in .uploadTexture()' ); - - } - - } else { - - state.texImage2D( _gl.TEXTURE_2D, i, glInternalFormat, mipmap.width, mipmap.height, 0, glFormat, glType, mipmap.data ); - - } - - } - - textureProperties.__maxMipLevel = mipmaps.length - 1; - - } else if ( texture.isDataTexture2DArray ) { - - state.texImage3D( _gl.TEXTURE_2D_ARRAY, 0, glInternalFormat, image.width, image.height, image.depth, 0, glFormat, glType, image.data ); - textureProperties.__maxMipLevel = 0; - - } else if ( texture.isDataTexture3D ) { - - state.texImage3D( _gl.TEXTURE_3D, 0, glInternalFormat, image.width, image.height, image.depth, 0, glFormat, glType, image.data ); - textureProperties.__maxMipLevel = 0; - - } else { - - // regular Texture (image, video, canvas) - - // use manually created mipmaps if available - // if there are no manual mipmaps - // set 0 level mipmap and then use GL to generate other mipmap levels - - if ( mipmaps.length > 0 && supportsMips ) { - - for ( var i = 0, il = mipmaps.length; i < il; i ++ ) { - - mipmap = mipmaps[ i ]; - state.texImage2D( _gl.TEXTURE_2D, i, glInternalFormat, glFormat, glType, mipmap ); - - } - - texture.generateMipmaps = false; - textureProperties.__maxMipLevel = mipmaps.length - 1; - - } else { - - state.texImage2D( _gl.TEXTURE_2D, 0, glInternalFormat, glFormat, glType, image ); - textureProperties.__maxMipLevel = 0; - - } - - } - - if ( textureNeedsGenerateMipmaps( texture, supportsMips ) ) { - - generateMipmap( _gl.TEXTURE_2D, texture, image.width, image.height ); - - } - - textureProperties.__version = texture.version; - - if ( texture.onUpdate ) texture.onUpdate( texture ); - - } - - // Render targets - - // Setup storage for target texture and bind it to correct framebuffer - function setupFrameBufferTexture( framebuffer, renderTarget, attachment, textureTarget ) { - - var glFormat = utils.convert( renderTarget.texture.format ); - var glType = utils.convert( renderTarget.texture.type ); - var glInternalFormat = getInternalFormat( glFormat, glType ); - state.texImage2D( textureTarget, 0, glInternalFormat, renderTarget.width, renderTarget.height, 0, glFormat, glType, null ); - _gl.bindFramebuffer( _gl.FRAMEBUFFER, framebuffer ); - _gl.framebufferTexture2D( _gl.FRAMEBUFFER, attachment, textureTarget, properties.get( renderTarget.texture ).__webglTexture, 0 ); - _gl.bindFramebuffer( _gl.FRAMEBUFFER, null ); - - } - - // Setup storage for internal depth/stencil buffers and bind to correct framebuffer - function setupRenderBufferStorage( renderbuffer, renderTarget, isMultisample ) { - - _gl.bindRenderbuffer( _gl.RENDERBUFFER, renderbuffer ); - - if ( renderTarget.depthBuffer && ! renderTarget.stencilBuffer ) { - - if ( isMultisample ) { - - var samples = getRenderTargetSamples( renderTarget ); - - _gl.renderbufferStorageMultisample( _gl.RENDERBUFFER, samples, _gl.DEPTH_COMPONENT16, renderTarget.width, renderTarget.height ); - - } else { - - _gl.renderbufferStorage( _gl.RENDERBUFFER, _gl.DEPTH_COMPONENT16, renderTarget.width, renderTarget.height ); - - } - - _gl.framebufferRenderbuffer( _gl.FRAMEBUFFER, _gl.DEPTH_ATTACHMENT, _gl.RENDERBUFFER, renderbuffer ); - - } else if ( renderTarget.depthBuffer && renderTarget.stencilBuffer ) { - - if ( isMultisample ) { - - var samples = getRenderTargetSamples( renderTarget ); - - _gl.renderbufferStorageMultisample( _gl.RENDERBUFFER, samples, _gl.DEPTH_STENCIL, renderTarget.width, renderTarget.height ); - - } else { - - _gl.renderbufferStorage( _gl.RENDERBUFFER, _gl.DEPTH_STENCIL, renderTarget.width, renderTarget.height ); - - } - - - _gl.framebufferRenderbuffer( _gl.FRAMEBUFFER, _gl.DEPTH_STENCIL_ATTACHMENT, _gl.RENDERBUFFER, renderbuffer ); - - } else { - - var glFormat = utils.convert( renderTarget.texture.format ); - var glType = utils.convert( renderTarget.texture.type ); - var glInternalFormat = getInternalFormat( glFormat, glType ); - - if ( isMultisample ) { - - var samples = getRenderTargetSamples( renderTarget ); - - _gl.renderbufferStorageMultisample( _gl.RENDERBUFFER, samples, glInternalFormat, renderTarget.width, renderTarget.height ); - - } else { - - _gl.renderbufferStorage( _gl.RENDERBUFFER, glInternalFormat, renderTarget.width, renderTarget.height ); - - } - - } - - _gl.bindRenderbuffer( _gl.RENDERBUFFER, null ); - - } - - // Setup resources for a Depth Texture for a FBO (needs an extension) - function setupDepthTexture( framebuffer, renderTarget ) { - - var isCube = ( renderTarget && renderTarget.isWebGLRenderTargetCube ); - if ( isCube ) throw new Error( 'Depth Texture with cube render targets is not supported' ); - - _gl.bindFramebuffer( _gl.FRAMEBUFFER, framebuffer ); - - if ( ! ( renderTarget.depthTexture && renderTarget.depthTexture.isDepthTexture ) ) { - - throw new Error( 'renderTarget.depthTexture must be an instance of THREE.DepthTexture' ); - - } - - // upload an empty depth texture with framebuffer size - if ( ! properties.get( renderTarget.depthTexture ).__webglTexture || - renderTarget.depthTexture.image.width !== renderTarget.width || - renderTarget.depthTexture.image.height !== renderTarget.height ) { - - renderTarget.depthTexture.image.width = renderTarget.width; - renderTarget.depthTexture.image.height = renderTarget.height; - renderTarget.depthTexture.needsUpdate = true; - - } - - setTexture2D( renderTarget.depthTexture, 0 ); - - var webglDepthTexture = properties.get( renderTarget.depthTexture ).__webglTexture; - - if ( renderTarget.depthTexture.format === DepthFormat ) { - - _gl.framebufferTexture2D( _gl.FRAMEBUFFER, _gl.DEPTH_ATTACHMENT, _gl.TEXTURE_2D, webglDepthTexture, 0 ); - - } else if ( renderTarget.depthTexture.format === DepthStencilFormat ) { - - _gl.framebufferTexture2D( _gl.FRAMEBUFFER, _gl.DEPTH_STENCIL_ATTACHMENT, _gl.TEXTURE_2D, webglDepthTexture, 0 ); - - } else { - - throw new Error( 'Unknown depthTexture format' ); - - } - - } - - // Setup GL resources for a non-texture depth buffer - function setupDepthRenderbuffer( renderTarget ) { - - var renderTargetProperties = properties.get( renderTarget ); - - var isCube = ( renderTarget.isWebGLRenderTargetCube === true ); - - if ( renderTarget.depthTexture ) { - - if ( isCube ) throw new Error( 'target.depthTexture not supported in Cube render targets' ); - - setupDepthTexture( renderTargetProperties.__webglFramebuffer, renderTarget ); - - } else { - - if ( isCube ) { - - renderTargetProperties.__webglDepthbuffer = []; - - for ( var i = 0; i < 6; i ++ ) { - - _gl.bindFramebuffer( _gl.FRAMEBUFFER, renderTargetProperties.__webglFramebuffer[ i ] ); - renderTargetProperties.__webglDepthbuffer[ i ] = _gl.createRenderbuffer(); - setupRenderBufferStorage( renderTargetProperties.__webglDepthbuffer[ i ], renderTarget ); - - } - - } else { - - _gl.bindFramebuffer( _gl.FRAMEBUFFER, renderTargetProperties.__webglFramebuffer ); - renderTargetProperties.__webglDepthbuffer = _gl.createRenderbuffer(); - setupRenderBufferStorage( renderTargetProperties.__webglDepthbuffer, renderTarget ); - - } - - } - - _gl.bindFramebuffer( _gl.FRAMEBUFFER, null ); - - } - - // Set up GL resources for the render target - function setupRenderTarget( renderTarget ) { - - var renderTargetProperties = properties.get( renderTarget ); - var textureProperties = properties.get( renderTarget.texture ); - - renderTarget.addEventListener( 'dispose', onRenderTargetDispose ); - - textureProperties.__webglTexture = _gl.createTexture(); - - info.memory.textures ++; - - var isCube = ( renderTarget.isWebGLRenderTargetCube === true ); - var isMultisample = ( renderTarget.isWebGLMultisampleRenderTarget === true ); - var supportsMips = isPowerOfTwo( renderTarget ) || capabilities.isWebGL2; - - // Setup framebuffer - - if ( isCube ) { - - renderTargetProperties.__webglFramebuffer = []; - - for ( var i = 0; i < 6; i ++ ) { - - renderTargetProperties.__webglFramebuffer[ i ] = _gl.createFramebuffer(); - - } - - } else { - - renderTargetProperties.__webglFramebuffer = _gl.createFramebuffer(); - - if ( isMultisample ) { - - if ( capabilities.isWebGL2 ) { - - renderTargetProperties.__webglMultisampledFramebuffer = _gl.createFramebuffer(); - renderTargetProperties.__webglColorRenderbuffer = _gl.createRenderbuffer(); - - _gl.bindRenderbuffer( _gl.RENDERBUFFER, renderTargetProperties.__webglColorRenderbuffer ); - var glFormat = utils.convert( renderTarget.texture.format ); - var glType = utils.convert( renderTarget.texture.type ); - var glInternalFormat = getInternalFormat( glFormat, glType ); - var samples = getRenderTargetSamples( renderTarget ); - _gl.renderbufferStorageMultisample( _gl.RENDERBUFFER, samples, glInternalFormat, renderTarget.width, renderTarget.height ); - - _gl.bindFramebuffer( _gl.FRAMEBUFFER, renderTargetProperties.__webglMultisampledFramebuffer ); - _gl.framebufferRenderbuffer( _gl.FRAMEBUFFER, _gl.COLOR_ATTACHMENT0, _gl.RENDERBUFFER, renderTargetProperties.__webglColorRenderbuffer ); - _gl.bindRenderbuffer( _gl.RENDERBUFFER, null ); - - if ( renderTarget.depthBuffer ) { - - renderTargetProperties.__webglDepthRenderbuffer = _gl.createRenderbuffer(); - setupRenderBufferStorage( renderTargetProperties.__webglDepthRenderbuffer, renderTarget, true ); - - } - - _gl.bindFramebuffer( _gl.FRAMEBUFFER, null ); - - - } else { - - console.warn( 'THREE.WebGLRenderer: WebGLMultisampleRenderTarget can only be used with WebGL2.' ); - - } - - } - - } - - // Setup color buffer - - if ( isCube ) { - - state.bindTexture( _gl.TEXTURE_CUBE_MAP, textureProperties.__webglTexture ); - setTextureParameters( _gl.TEXTURE_CUBE_MAP, renderTarget.texture, supportsMips ); - - for ( var i = 0; i < 6; i ++ ) { - - setupFrameBufferTexture( renderTargetProperties.__webglFramebuffer[ i ], renderTarget, _gl.COLOR_ATTACHMENT0, _gl.TEXTURE_CUBE_MAP_POSITIVE_X + i ); - - } - - if ( textureNeedsGenerateMipmaps( renderTarget.texture, supportsMips ) ) { - - generateMipmap( _gl.TEXTURE_CUBE_MAP, renderTarget.texture, renderTarget.width, renderTarget.height ); - - } - - state.bindTexture( _gl.TEXTURE_CUBE_MAP, null ); - - } else { - - state.bindTexture( _gl.TEXTURE_2D, textureProperties.__webglTexture ); - setTextureParameters( _gl.TEXTURE_2D, renderTarget.texture, supportsMips ); - setupFrameBufferTexture( renderTargetProperties.__webglFramebuffer, renderTarget, _gl.COLOR_ATTACHMENT0, _gl.TEXTURE_2D ); - - if ( textureNeedsGenerateMipmaps( renderTarget.texture, supportsMips ) ) { - - generateMipmap( _gl.TEXTURE_2D, renderTarget.texture, renderTarget.width, renderTarget.height ); - - } - - state.bindTexture( _gl.TEXTURE_2D, null ); - - } - - // Setup depth and stencil buffers - - if ( renderTarget.depthBuffer ) { - - setupDepthRenderbuffer( renderTarget ); - - } - - } - - function updateRenderTargetMipmap( renderTarget ) { - - var texture = renderTarget.texture; - var supportsMips = isPowerOfTwo( renderTarget ) || capabilities.isWebGL2; - - if ( textureNeedsGenerateMipmaps( texture, supportsMips ) ) { - - var target = renderTarget.isWebGLRenderTargetCube ? _gl.TEXTURE_CUBE_MAP : _gl.TEXTURE_2D; - var webglTexture = properties.get( texture ).__webglTexture; - - state.bindTexture( target, webglTexture ); - generateMipmap( target, texture, renderTarget.width, renderTarget.height ); - state.bindTexture( target, null ); - - } - - } - - function updateMultisampleRenderTarget( renderTarget ) { - - if ( renderTarget.isWebGLMultisampleRenderTarget ) { - - if ( capabilities.isWebGL2 ) { - - var renderTargetProperties = properties.get( renderTarget ); - - _gl.bindFramebuffer( _gl.READ_FRAMEBUFFER, renderTargetProperties.__webglMultisampledFramebuffer ); - _gl.bindFramebuffer( _gl.DRAW_FRAMEBUFFER, renderTargetProperties.__webglFramebuffer ); - - var width = renderTarget.width; - var height = renderTarget.height; - var mask = _gl.COLOR_BUFFER_BIT; - - if ( renderTarget.depthBuffer ) mask |= _gl.DEPTH_BUFFER_BIT; - if ( renderTarget.stencilBuffer ) mask |= _gl.STENCIL_BUFFER_BIT; - - _gl.blitFramebuffer( 0, 0, width, height, 0, 0, width, height, mask, _gl.NEAREST ); - - } else { - - console.warn( 'THREE.WebGLRenderer: WebGLMultisampleRenderTarget can only be used with WebGL2.' ); - - } - - } - - } - - function getRenderTargetSamples( renderTarget ) { - - return ( capabilities.isWebGL2 && renderTarget.isWebGLMultisampleRenderTarget ) ? - Math.min( capabilities.maxSamples, renderTarget.samples ) : 0; - - } - - function updateVideoTexture( texture ) { - - var id = texture.id; - var frame = info.render.frame; - - // Check the last frame we updated the VideoTexture - - if ( _videoTextures[ id ] !== frame ) { - - _videoTextures[ id ] = frame; - texture.update(); - - } - - } - - // backwards compatibility - - var warnedTexture2D = false; - var warnedTextureCube = false; - - function safeSetTexture2D( texture, slot ) { - - if ( texture && texture.isWebGLRenderTarget ) { - - if ( warnedTexture2D === false ) { - - console.warn( "THREE.WebGLTextures.safeSetTexture2D: don't use render targets as textures. Use their .texture property instead." ); - warnedTexture2D = true; - - } - - texture = texture.texture; - - } - - setTexture2D( texture, slot ); - - } - - function safeSetTextureCube( texture, slot ) { - - if ( texture && texture.isWebGLRenderTargetCube ) { - - if ( warnedTextureCube === false ) { - - console.warn( "THREE.WebGLTextures.safeSetTextureCube: don't use cube render targets as textures. Use their .texture property instead." ); - warnedTextureCube = true; - - } - - texture = texture.texture; - - } - - // currently relying on the fact that WebGLRenderTargetCube.texture is a Texture and NOT a CubeTexture - // TODO: unify these code paths - if ( ( texture && texture.isCubeTexture ) || - ( Array.isArray( texture.image ) && texture.image.length === 6 ) ) { - - // CompressedTexture can have Array in image :/ - - // this function alone should take care of cube textures - setTextureCube( texture, slot ); - - } else { - - // assumed: texture property of THREE.WebGLRenderTargetCube - setTextureCubeDynamic( texture, slot ); - - } - - } - - // - - this.allocateTextureUnit = allocateTextureUnit; - this.resetTextureUnits = resetTextureUnits; - - this.setTexture2D = setTexture2D; - this.setTexture2DArray = setTexture2DArray; - this.setTexture3D = setTexture3D; - this.setTextureCube = setTextureCube; - this.setTextureCubeDynamic = setTextureCubeDynamic; - this.setupRenderTarget = setupRenderTarget; - this.updateRenderTargetMipmap = updateRenderTargetMipmap; - this.updateMultisampleRenderTarget = updateMultisampleRenderTarget; - - this.safeSetTexture2D = safeSetTexture2D; - this.safeSetTextureCube = safeSetTextureCube; - -} - -export { WebGLTextures }; diff --git a/spaces/bastiendechamps/geoguessr-bot/geoguessr_bot/retriever/random_embedder.py b/spaces/bastiendechamps/geoguessr-bot/geoguessr_bot/retriever/random_embedder.py deleted file mode 100644 index 13670c1fe8b04e607c4e8be3af4afa5d19ca8ee2..0000000000000000000000000000000000000000 --- a/spaces/bastiendechamps/geoguessr-bot/geoguessr_bot/retriever/random_embedder.py +++ /dev/null @@ -1,16 +0,0 @@ -from dataclasses import dataclass - -import numpy as np -from PIL import Image - -from geoguessr_bot.retriever import AbstractImageEmbedder - - -@dataclass -class RandomEmbedder(AbstractImageEmbedder): - n_dim: int = 8 - - def embed(self, image: Image) -> np.ndarray: - """Embed an image - """ - return np.random.rand(self.n_dim) diff --git a/spaces/betterme/Nice/pages/echarts.py b/spaces/betterme/Nice/pages/echarts.py deleted file mode 100644 index a68400c76d0d08ac1ae21171e1fa353f5e1e17d1..0000000000000000000000000000000000000000 --- a/spaces/betterme/Nice/pages/echarts.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Project : AI. @by PyCharm -# @File : echarts -# @Time : 2023/4/7 11:02 -# @Author : betterme -# @WeChat : meutils -# @Software : PyCharm -# @Description : - -from _pyecharts import options as opts -from _pyecharts.charts import Bar, WordCloud -from streamlit_echarts import st_pyecharts -import streamlit as st - -# st.markdown(open('xx.html').read(), unsafe_allow_html=True) - -tab1, tab2, tab3 = st.tabs(["Bar", "WordCloud", "Owl"]) - -with tab1: - b = ( - Bar() - .add_xaxis(["Microsoft", "Amazon", "IBM", "Oracle", "Google", "Alibaba"]) - .add_yaxis( - "2017-2018 Revenue in (billion $)", [21.2, 20.4, 10.3, 6.08, 4, 2.2] - ) - .set_global_opts( - title_opts=opts.TitleOpts( - title="Top cloud providers 2018", subtitle="2017-2018 Revenue" - ), - toolbox_opts=opts.ToolboxOpts(), - ) - ) - st_pyecharts(b) - -with tab2: - pairs = [('中国', 33), - ('苹果', 24), - ('奚梦瑶', 20), - ('美国', 16), - ('特朗普', 16), - ('何猷君', 15), - ('戛纳', 13), - ('红毯', 12), - ('iPhone', 12), - ('车队', 9), - ('车祸', 9), - ('优衣', 9), - ('信息', 9), - ('李亚鹏', 9), - ('恋情', 9), - ('任素', 9), - ('男孩', 9), - ('亚洲', 8), - ('孩子', 8), - ('大学生', 8)] - shapes = ['circle', 'cardioid', 'diamond', 'triangle-forward', 'triangle', 'pentagon', 'star'] - - wc = ( - WordCloud() - .add("WordCloud", data_pair=pairs, shape=shapes[0], width='900px', height='500px') - - .set_global_opts( - title_opts=opts.TitleOpts( - title="WordCloud", subtitle="WordCloud" - ), - toolbox_opts=opts.ToolboxOpts(), ) - ) - - st_pyecharts(wc) diff --git a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/deep/models/inceptionv4.py b/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/deep/models/inceptionv4.py deleted file mode 100644 index b14916f140712298866c943ebdb4ebad67d72fc4..0000000000000000000000000000000000000000 --- a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/deep/models/inceptionv4.py +++ /dev/null @@ -1,381 +0,0 @@ -from __future__ import division, absolute_import -import torch -import torch.nn as nn -import torch.utils.model_zoo as model_zoo - -__all__ = ['inceptionv4'] -""" -Code imported from https://github.com/Cadene/pretrained-models.pytorch -""" - -pretrained_settings = { - 'inceptionv4': { - 'imagenet': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth', - 'input_space': 'RGB', - 'input_size': [3, 299, 299], - 'input_range': [0, 1], - 'mean': [0.5, 0.5, 0.5], - 'std': [0.5, 0.5, 0.5], - 'num_classes': 1000 - }, - 'imagenet+background': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth', - 'input_space': 'RGB', - 'input_size': [3, 299, 299], - 'input_range': [0, 1], - 'mean': [0.5, 0.5, 0.5], - 'std': [0.5, 0.5, 0.5], - 'num_classes': 1001 - } - } -} - - -class BasicConv2d(nn.Module): - - def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): - super(BasicConv2d, self).__init__() - self.conv = nn.Conv2d( - in_planes, - out_planes, - kernel_size=kernel_size, - stride=stride, - padding=padding, - bias=False - ) # verify bias false - self.bn = nn.BatchNorm2d( - out_planes, - eps=0.001, # value found in tensorflow - momentum=0.1, # default pytorch value - affine=True - ) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.relu(x) - return x - - -class Mixed_3a(nn.Module): - - def __init__(self): - super(Mixed_3a, self).__init__() - self.maxpool = nn.MaxPool2d(3, stride=2) - self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) - - def forward(self, x): - x0 = self.maxpool(x) - x1 = self.conv(x) - out = torch.cat((x0, x1), 1) - return out - - -class Mixed_4a(nn.Module): - - def __init__(self): - super(Mixed_4a, self).__init__() - - self.branch0 = nn.Sequential( - BasicConv2d(160, 64, kernel_size=1, stride=1), - BasicConv2d(64, 96, kernel_size=3, stride=1) - ) - - self.branch1 = nn.Sequential( - BasicConv2d(160, 64, kernel_size=1, stride=1), - BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), - BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), - BasicConv2d(64, 96, kernel_size=(3, 3), stride=1) - ) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - out = torch.cat((x0, x1), 1) - return out - - -class Mixed_5a(nn.Module): - - def __init__(self): - super(Mixed_5a, self).__init__() - self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2) - self.maxpool = nn.MaxPool2d(3, stride=2) - - def forward(self, x): - x0 = self.conv(x) - x1 = self.maxpool(x) - out = torch.cat((x0, x1), 1) - return out - - -class Inception_A(nn.Module): - - def __init__(self): - super(Inception_A, self).__init__() - self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1) - - self.branch1 = nn.Sequential( - BasicConv2d(384, 64, kernel_size=1, stride=1), - BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1) - ) - - self.branch2 = nn.Sequential( - BasicConv2d(384, 64, kernel_size=1, stride=1), - BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), - BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) - ) - - self.branch3 = nn.Sequential( - nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), - BasicConv2d(384, 96, kernel_size=1, stride=1) - ) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - x2 = self.branch2(x) - x3 = self.branch3(x) - out = torch.cat((x0, x1, x2, x3), 1) - return out - - -class Reduction_A(nn.Module): - - def __init__(self): - super(Reduction_A, self).__init__() - self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2) - - self.branch1 = nn.Sequential( - BasicConv2d(384, 192, kernel_size=1, stride=1), - BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), - BasicConv2d(224, 256, kernel_size=3, stride=2) - ) - - self.branch2 = nn.MaxPool2d(3, stride=2) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - x2 = self.branch2(x) - out = torch.cat((x0, x1, x2), 1) - return out - - -class Inception_B(nn.Module): - - def __init__(self): - super(Inception_B, self).__init__() - self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1) - - self.branch1 = nn.Sequential( - BasicConv2d(1024, 192, kernel_size=1, stride=1), - BasicConv2d( - 192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3) - ), - BasicConv2d( - 224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0) - ) - ) - - self.branch2 = nn.Sequential( - BasicConv2d(1024, 192, kernel_size=1, stride=1), - BasicConv2d( - 192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0) - ), - BasicConv2d( - 192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3) - ), - BasicConv2d( - 224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0) - ), - BasicConv2d( - 224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3) - ) - ) - - self.branch3 = nn.Sequential( - nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), - BasicConv2d(1024, 128, kernel_size=1, stride=1) - ) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - x2 = self.branch2(x) - x3 = self.branch3(x) - out = torch.cat((x0, x1, x2, x3), 1) - return out - - -class Reduction_B(nn.Module): - - def __init__(self): - super(Reduction_B, self).__init__() - - self.branch0 = nn.Sequential( - BasicConv2d(1024, 192, kernel_size=1, stride=1), - BasicConv2d(192, 192, kernel_size=3, stride=2) - ) - - self.branch1 = nn.Sequential( - BasicConv2d(1024, 256, kernel_size=1, stride=1), - BasicConv2d( - 256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3) - ), - BasicConv2d( - 256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0) - ), BasicConv2d(320, 320, kernel_size=3, stride=2) - ) - - self.branch2 = nn.MaxPool2d(3, stride=2) - - def forward(self, x): - x0 = self.branch0(x) - x1 = self.branch1(x) - x2 = self.branch2(x) - out = torch.cat((x0, x1, x2), 1) - return out - - -class Inception_C(nn.Module): - - def __init__(self): - super(Inception_C, self).__init__() - - self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1) - - self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) - self.branch1_1a = BasicConv2d( - 384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1) - ) - self.branch1_1b = BasicConv2d( - 384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) - ) - - self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) - self.branch2_1 = BasicConv2d( - 384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0) - ) - self.branch2_2 = BasicConv2d( - 448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1) - ) - self.branch2_3a = BasicConv2d( - 512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1) - ) - self.branch2_3b = BasicConv2d( - 512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) - ) - - self.branch3 = nn.Sequential( - nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), - BasicConv2d(1536, 256, kernel_size=1, stride=1) - ) - - def forward(self, x): - x0 = self.branch0(x) - - x1_0 = self.branch1_0(x) - x1_1a = self.branch1_1a(x1_0) - x1_1b = self.branch1_1b(x1_0) - x1 = torch.cat((x1_1a, x1_1b), 1) - - x2_0 = self.branch2_0(x) - x2_1 = self.branch2_1(x2_0) - x2_2 = self.branch2_2(x2_1) - x2_3a = self.branch2_3a(x2_2) - x2_3b = self.branch2_3b(x2_2) - x2 = torch.cat((x2_3a, x2_3b), 1) - - x3 = self.branch3(x) - - out = torch.cat((x0, x1, x2, x3), 1) - return out - - -class InceptionV4(nn.Module): - """Inception-v4. - - Reference: - Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual - Connections on Learning. AAAI 2017. - - Public keys: - - ``inceptionv4``: InceptionV4. - """ - - def __init__(self, num_classes, loss, **kwargs): - super(InceptionV4, self).__init__() - self.loss = loss - - self.features = nn.Sequential( - BasicConv2d(3, 32, kernel_size=3, stride=2), - BasicConv2d(32, 32, kernel_size=3, stride=1), - BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), - Mixed_3a(), - Mixed_4a(), - Mixed_5a(), - Inception_A(), - Inception_A(), - Inception_A(), - Inception_A(), - Reduction_A(), # Mixed_6a - Inception_B(), - Inception_B(), - Inception_B(), - Inception_B(), - Inception_B(), - Inception_B(), - Inception_B(), - Reduction_B(), # Mixed_7a - Inception_C(), - Inception_C(), - Inception_C() - ) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.classifier = nn.Linear(1536, num_classes) - - def forward(self, x): - f = self.features(x) - v = self.global_avgpool(f) - v = v.view(v.size(0), -1) - - if not self.training: - return v - - y = self.classifier(v) - - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError('Unsupported loss: {}'.format(self.loss)) - - -def init_pretrained_weights(model, model_url): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - pretrain_dict = model_zoo.load_url(model_url) - model_dict = model.state_dict() - pretrain_dict = { - k: v - for k, v in pretrain_dict.items() - if k in model_dict and model_dict[k].size() == v.size() - } - model_dict.update(pretrain_dict) - model.load_state_dict(model_dict) - - -def inceptionv4(num_classes, loss='softmax', pretrained=True, **kwargs): - model = InceptionV4(num_classes, loss, **kwargs) - if pretrained: - model_url = pretrained_settings['inceptionv4']['imagenet']['url'] - init_pretrained_weights(model, model_url) - return model diff --git a/spaces/bigcode/bigcode-editor/start.py b/spaces/bigcode/bigcode-editor/start.py deleted file mode 100644 index 8d5428c553be35fc110177a74e9b84560416ceab..0000000000000000000000000000000000000000 --- a/spaces/bigcode/bigcode-editor/start.py +++ /dev/null @@ -1,3 +0,0 @@ -import subprocess - -subprocess.run("uvicorn app:app --timeout-keep-alive 300 --host 0.0.0.0 --port 7860", shell=True) diff --git a/spaces/bigjoker/stable-diffusion-webui/test/basic_features/utils_test.py b/spaces/bigjoker/stable-diffusion-webui/test/basic_features/utils_test.py deleted file mode 100644 index 0bfc28a0d30c070c292ff8154e9b93a74abecb85..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/test/basic_features/utils_test.py +++ /dev/null @@ -1,62 +0,0 @@ -import unittest -import requests - -class UtilsTests(unittest.TestCase): - def setUp(self): - self.url_options = "http://localhost:7860/sdapi/v1/options" - self.url_cmd_flags = "http://localhost:7860/sdapi/v1/cmd-flags" - self.url_samplers = "http://localhost:7860/sdapi/v1/samplers" - self.url_upscalers = "http://localhost:7860/sdapi/v1/upscalers" - self.url_sd_models = "http://localhost:7860/sdapi/v1/sd-models" - self.url_hypernetworks = "http://localhost:7860/sdapi/v1/hypernetworks" - self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers" - self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models" - self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles" - self.url_embeddings = "http://localhost:7860/sdapi/v1/embeddings" - - def test_options_get(self): - self.assertEqual(requests.get(self.url_options).status_code, 200) - - def test_options_write(self): - response = requests.get(self.url_options) - self.assertEqual(response.status_code, 200) - - pre_value = response.json()["send_seed"] - - self.assertEqual(requests.post(self.url_options, json={"send_seed":not pre_value}).status_code, 200) - - response = requests.get(self.url_options) - self.assertEqual(response.status_code, 200) - self.assertEqual(response.json()["send_seed"], not pre_value) - - requests.post(self.url_options, json={"send_seed": pre_value}) - - def test_cmd_flags(self): - self.assertEqual(requests.get(self.url_cmd_flags).status_code, 200) - - def test_samplers(self): - self.assertEqual(requests.get(self.url_samplers).status_code, 200) - - def test_upscalers(self): - self.assertEqual(requests.get(self.url_upscalers).status_code, 200) - - def test_sd_models(self): - self.assertEqual(requests.get(self.url_sd_models).status_code, 200) - - def test_hypernetworks(self): - self.assertEqual(requests.get(self.url_hypernetworks).status_code, 200) - - def test_face_restorers(self): - self.assertEqual(requests.get(self.url_face_restorers).status_code, 200) - - def test_realesrgan_models(self): - self.assertEqual(requests.get(self.url_realesrgan_models).status_code, 200) - - def test_prompt_styles(self): - self.assertEqual(requests.get(self.url_prompt_styles).status_code, 200) - - def test_embeddings(self): - self.assertEqual(requests.get(self.url_embeddings).status_code, 200) - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/bigslime/stablediffusion-infinity/perlin2d.py b/spaces/bigslime/stablediffusion-infinity/perlin2d.py deleted file mode 100644 index 917c2c6511f5f1a75a284be9a9fef3248d82f2f9..0000000000000000000000000000000000000000 --- a/spaces/bigslime/stablediffusion-infinity/perlin2d.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np - -########## -# https://stackoverflow.com/questions/42147776/producing-2d-perlin-noise-with-numpy/42154921#42154921 -def perlin(x, y, seed=0): - # permutation table - np.random.seed(seed) - p = np.arange(256, dtype=int) - np.random.shuffle(p) - p = np.stack([p, p]).flatten() - # coordinates of the top-left - xi, yi = x.astype(int), y.astype(int) - # internal coordinates - xf, yf = x - xi, y - yi - # fade factors - u, v = fade(xf), fade(yf) - # noise components - n00 = gradient(p[p[xi] + yi], xf, yf) - n01 = gradient(p[p[xi] + yi + 1], xf, yf - 1) - n11 = gradient(p[p[xi + 1] + yi + 1], xf - 1, yf - 1) - n10 = gradient(p[p[xi + 1] + yi], xf - 1, yf) - # combine noises - x1 = lerp(n00, n10, u) - x2 = lerp(n01, n11, u) # FIX1: I was using n10 instead of n01 - return lerp(x1, x2, v) # FIX2: I also had to reverse x1 and x2 here - - -def lerp(a, b, x): - "linear interpolation" - return a + x * (b - a) - - -def fade(t): - "6t^5 - 15t^4 + 10t^3" - return 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3 - - -def gradient(h, x, y): - "grad converts h to the right gradient vector and return the dot product with (x,y)" - vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]]) - g = vectors[h % 4] - return g[:, :, 0] * x + g[:, :, 1] * y - - -########## \ No newline at end of file diff --git a/spaces/bincooo/auto-ai/README.md b/spaces/bincooo/auto-ai/README.md deleted file mode 100644 index 9affbabd2d707f73afcbaa2cff3e77d1c7a6b9a1..0000000000000000000000000000000000000000 --- a/spaces/bincooo/auto-ai/README.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Auto Ai -emoji: 🔥 -colorFrom: gray -colorTo: gray -sdk: docker -pinned: false -license: mit -app_port: 8444 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - - -FORK本项目后,请自行添加`.env.example`下的环境变量: -```env -# 自行搭建注册接口,或者直接使用claudeai.ai的:https://email.claudeai.ai/claude_api -REV= -# BingAI cookie值(_U=部分) -BING_TOKEN= -# 转发地址 -BING_BASE_URL= -``` -pic.png diff --git a/spaces/bioriAsaeru/text-to-voice/Crysis 1 Crack Free Download !FREE!.md b/spaces/bioriAsaeru/text-to-voice/Crysis 1 Crack Free Download !FREE!.md deleted file mode 100644 index 8a6e8aef5d5f531e2bc11199ea265e07f39fb043..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Crysis 1 Crack Free Download !FREE!.md +++ /dev/null @@ -1,9 +0,0 @@ -
    -

    Why use JAWS? JAWS (Job Access With Speech) is a free speech-enabled screen reader designed for people who are blind or have low vision. It has the ability to read large amounts of text from the screen and also the ability to read program help text.

    -

    Crysis 1 Crack Free Download


    DOWNLOADhttps://urloso.com/2uyQ2c



    -

    Crysis 1 Crack Free Download PC Game In Direct Link. Crysis 1 Crack Free Download has been released recently. Crysis 1 Crack Free Download PC Game is an Action game. It's been released on 01-Jan-2010 by Crytek. Crysis 1 Crack Free Download PC Game is part of the Crysis Series. It's offline installer and standalone setup for PC. Crysis 1 Crack Free Download PC Game having download size of 26.6 MB, runs on Windows 7 and Windows XP.

    -

    -Replaced the stealth gameplay mechanic with #Ghost - A possibility of surviving for a long period of time in Crysis 2 during the Pre-Alpha stage was added. This mechanic has since been removed but the very few players that managed to survive for hours and days on the International map will be rewarded with lots of credits (1000 when you first get there) - Removed the built in intro/outro video. Credits should go to the numerous other video editors out there that made Crysis so spectacular to play, and to Backflip for the above video, which actually helped to create a new template for all upcoming intro/outro videos for future Crysis games.

    -

    Crysis 1 has long been one of the greatest games of all time. The sequel was an absolute atrocity, and many of the aspects of the first game that made it great are gone. The gameplay, interface, and player feedback systems are all terrible, and it's impossible to argue with the results. Crysis 1 was groundbreaking in terms of gameplay and graphics, and remains one of the best games ever made.

    -

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Download Gamesalad Pro Full Version ((INSTALL)).md b/spaces/bioriAsaeru/text-to-voice/Download Gamesalad Pro Full Version ((INSTALL)).md deleted file mode 100644 index 87711697b2baccb90ad3aaa7d15ea0af607de6c0..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Download Gamesalad Pro Full Version ((INSTALL)).md +++ /dev/null @@ -1,6 +0,0 @@ -

    download gamesalad pro full version


    Downloadhttps://urloso.com/2uyRXh



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/bla/tranny/App/Transcription/Schemas.py b/spaces/bla/tranny/App/Transcription/Schemas.py deleted file mode 100644 index a6a4020248fbda630275971bfb3bdf5fe994afe7..0000000000000000000000000000000000000000 --- a/spaces/bla/tranny/App/Transcription/Schemas.py +++ /dev/null @@ -1,26 +0,0 @@ -from pydantic import BaseModel, Field -from typing import Optional -from datetime import datetime - - -class TranscriptionMetadata(BaseModel): - duration: int = 0 - language: str = "-" - percentage: str = "-" - content: list = [] - status: str = "PENDING" - - -class TranscriptionResult(TranscriptionMetadata): - created_at: datetime = Field(default_factory=datetime.utcnow) - task_id: str - - -class BaseTranscription(TranscriptionResult): - file_name: str = "-" - tl_file_id: Optional[str] = "-" - youtubeLink: Optional[str] = "-" - - -class UserDetails(BaseModel): - userId: str diff --git a/spaces/boddles2/pyannote-speaker-diarization-2/app.py b/spaces/boddles2/pyannote-speaker-diarization-2/app.py deleted file mode 100644 index a63c1ec9644c2382681e0b963467f9b33e4e0ff9..0000000000000000000000000000000000000000 --- a/spaces/boddles2/pyannote-speaker-diarization-2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/pyannote/speaker-diarization").launch() \ No newline at end of file diff --git a/spaces/bofenghuang/whisper-demo-german/app.py b/spaces/bofenghuang/whisper-demo-german/app.py deleted file mode 100644 index 23bb21027c118e1d58473b681501a14dc962cfe2..0000000000000000000000000000000000000000 --- a/spaces/bofenghuang/whisper-demo-german/app.py +++ /dev/null @@ -1 +0,0 @@ -run_demo_ct2.py \ No newline at end of file diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/utils/best_state.py b/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/utils/best_state.py deleted file mode 100644 index f5ad551432ad5cb0f83278b5d2100f9aa287958b..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/utils/best_state.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -import logging -import typing as tp - -import flashy -import torch - -from ..optim import ModuleDictEMA -from .utils import copy_state - - -logger = logging.getLogger(__name__) - - -class BestStateDictManager(flashy.state.StateDictSource): - """BestStateDictManager maintains a copy of best state_dict() for registered sources. - - BestStateDictManager has two main attributes: - states (dict): State dict of the registered StateDictSource. - param_ids (dict): Dict of parameter ids for registered states from ModuleDictEMA and other sources. - - When registering new sources, the BestStateDictManager will ensure two conflicting sources between - ModuleDictEMA and original modules are not both registered as it would otherwise create ambiguity about - what to consider for best state. - - Args: - device (torch.device or str): Device on which we keep the copy. - dtype (torch.dtype): Data type for the state parameters. - """ - def __init__(self, device: tp.Union[torch.device, str] = 'cpu', - dtype: tp.Optional[torch.dtype] = None): - self.device = device - self.states: dict = {} - self.param_ids: dict = defaultdict(dict) - self.dtype = dtype - - def _get_parameter_ids(self, state_dict): - return {id(p): name for name, p in state_dict.items() if isinstance(p, torch.Tensor)} - - def _validate_no_parameter_ids_overlap(self, name: str, param_ids: dict): - for registered_name, registered_param_ids in self.param_ids.items(): - if registered_name != name: - overlap = set.intersection(registered_param_ids.keys(), param_ids.keys()) - assert len(overlap) == 0, f"Found {len(overlap)} / {len(param_ids.keys())} overlapping parameters" - f" in {name} and already registered {registered_name}: {' '.join(overlap)}" - - def update(self, name: str, source: flashy.state.StateDictSource): - if name not in self.states: - raise ValueError(f"{name} missing from registered states.") - self.states[name] = copy_state(source.state_dict(), device=self.device, dtype=self.dtype) - - def register(self, name: str, source: flashy.state.StateDictSource): - if name in self.states: - raise ValueError(f"{name} already present in states.") - # Registering parameter ids for EMA and non-EMA states allows us to check that - # there is no overlap that would create ambiguity about how to handle the best state - param_ids = self._get_parameter_ids(source.state_dict()) - if isinstance(source, ModuleDictEMA): - logger.debug(f"Registering to best state: ModuleDictEMA '{name}' with {len(param_ids)} params") - self._validate_no_parameter_ids_overlap(name, param_ids) - self.param_ids[name] = param_ids - else: - logger.debug(f"Registering to best state: StateDictSource '{name}' with {len(param_ids)} params") - self._validate_no_parameter_ids_overlap('base', param_ids) - self.param_ids['base'].update(param_ids) - # Register state - self.states[name] = copy_state(source.state_dict(), device=self.device, dtype=self.dtype) - - def state_dict(self) -> flashy.state.StateDict: - return self.states - - def load_state_dict(self, state: flashy.state.StateDict): - for name, sub_state in state.items(): - for k, v in sub_state.items(): - self.states[name][k].copy_(v) diff --git a/spaces/camenduru/10/README.md b/spaces/camenduru/10/README.md deleted file mode 100644 index e406a4c6dbf0080fdce51059fe1e1fc3088c6be2..0000000000000000000000000000000000000000 --- a/spaces/camenduru/10/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: '' -emoji: 👾 -colorFrom: purple -colorTo: purple -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/cccc-c/web-ui-pub/index.html b/spaces/cccc-c/web-ui-pub/index.html deleted file mode 100644 index 6fb24f3e9bc4fe4349f8725ec013be091d01bea3..0000000000000000000000000000000000000000 --- a/spaces/cccc-c/web-ui-pub/index.html +++ /dev/null @@ -1 +0,0 @@ -Gradiobot UI \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/pplm/pplm_classification_head.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/pplm/pplm_classification_head.py deleted file mode 100644 index e26521fe39101f297e24d93e0a73028c803b390b..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/pplm/pplm_classification_head.py +++ /dev/null @@ -1,19 +0,0 @@ -from torch import nn - - -class ClassificationHead(nn.Module): - """Classification Head for transformer encoders""" - - def __init__(self, class_size, embed_size): - super().__init__() - self.class_size = class_size - self.embed_size = embed_size - # self.mlp1 = nn.Linear(embed_size, embed_size) - # self.mlp2 = (nn.Linear(embed_size, class_size)) - self.mlp = nn.Linear(embed_size, class_size) - - def forward(self, hidden_state): - # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) - # hidden_state = self.mlp2(hidden_state) - logits = self.mlp(hidden_state) - return logits diff --git a/spaces/chendl/compositional_test/transformers/examples/tensorflow/translation/README.md b/spaces/chendl/compositional_test/transformers/examples/tensorflow/translation/README.md deleted file mode 100644 index df5ee9c1ae36ba4de7817633517d989990b4ed06..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/tensorflow/translation/README.md +++ /dev/null @@ -1,69 +0,0 @@ - - -# Translation example - -This script shows an example of training a *translation* model with the 🤗 Transformers library. -For straightforward use-cases you may be able to use these scripts without modification, although we have also -included comments in the code to indicate areas that you may need to adapt to your own projects. - -### Multi-GPU and TPU usage - -By default, these scripts use a `MirroredStrategy` and will use multiple GPUs effectively if they are available. TPUs -can also be used by passing the name of the TPU resource with the `--tpu` argument. - -### Example commands and caveats - -MBart and some T5 models require special handling. - -T5 models `t5-small`, `t5-base`, `t5-large`, `t5-3b` and `t5-11b` must use an additional argument: `--source_prefix "translate {source_lang} to {target_lang}"`. For example: - -```bash -python run_translation.py \ - --model_name_or_path t5-small \ - --do_train \ - --do_eval \ - --source_lang en \ - --target_lang ro \ - --source_prefix "translate English to Romanian: " \ - --dataset_name wmt16 \ - --dataset_config_name ro-en \ - --output_dir /tmp/tst-translation \ - --per_device_train_batch_size=16 \ - --per_device_eval_batch_size=16 \ - --overwrite_output_dir -``` - -If you get a terrible BLEU score, make sure that you didn't forget to use the `--source_prefix` argument. - -For the aforementioned group of T5 models it's important to remember that if you switch to a different language pair, make sure to adjust the source and target values in all 3 language-specific command line argument: `--source_lang`, `--target_lang` and `--source_prefix`. - -MBart models require a different format for `--source_lang` and `--target_lang` values, e.g. instead of `en` it expects `en_XX`, for `ro` it expects `ro_RO`. The full MBart specification for language codes can be found [here](https://huggingface.co/facebook/mbart-large-cc25). For example: - -```bash -python run_translation.py \ - --model_name_or_path facebook/mbart-large-en-ro \ - --do_train \ - --do_eval \ - --dataset_name wmt16 \ - --dataset_config_name ro-en \ - --source_lang en_XX \ - --target_lang ro_RO \ - --output_dir /tmp/tst-translation \ - --per_device_train_batch_size=16 \ - --per_device_eval_batch_size=16 \ - --overwrite_output_dir - ``` diff --git a/spaces/chinhon/translation_eng2ch/README.md b/spaces/chinhon/translation_eng2ch/README.md deleted file mode 100644 index 783148041c150fec17fae5737c39d9738b9f5ffc..0000000000000000000000000000000000000000 --- a/spaces/chinhon/translation_eng2ch/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Translation_eng2ch -emoji: 👁 -colorFrom: red -colorTo: red -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/chrisjay/afro-speech/inference.py b/spaces/chrisjay/afro-speech/inference.py deleted file mode 100644 index 66d8781e4788b3e4ea881ce60100120a43bf08ef..0000000000000000000000000000000000000000 --- a/spaces/chrisjay/afro-speech/inference.py +++ /dev/null @@ -1,119 +0,0 @@ -import torch -import torchaudio -from torch import nn -from transformers import AutoFeatureExtractor,AutoModelForAudioClassification,pipeline - -#Preprocessing the data -feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") -max_duration = 2.0 # seconds - - -if torch.cuda.is_available(): - device = "cuda" -else: - device = "cpu" - -softmax = nn.Softmax() - - -label2id, id2label = dict(), dict() -labels = ['0','1','2','3','4','5','6','7','8','9'] -num_labels = 10 - -for i, label in enumerate(labels): - label2id[label] = str(i) - id2label[str(i)] = label - - -def get_pipeline(model_name): - if model_name.split('-')[-1].strip()!='ibo': - return None - return pipeline(task="audio-classification", model=model_name) - - -def load_model(model_checkpoint): - #if model_checkpoint.split('-')[-1].strip()!='ibo': #This is for DEBUGGING - # return None, None - - # construct model and assign it to device - model = AutoModelForAudioClassification.from_pretrained( - model_checkpoint, - num_labels=num_labels, - label2id=label2id, - id2label=id2label, - ).to(device) - - return model - -language_dict = { - "Igbo":'ibo', - "Oshiwambo":'kua', - "Yoruba":'yor', - "Oromo":'gax', - "Shona":'sna', - "Rundi":'run', - "Choose language":'none', - "MULTILINGUAL":'all' - } - -AUDIO_CLASSIFICATION_MODELS= {'ibo':load_model('chrisjay/afrospeech-wav2vec-ibo'), - 'kua':load_model('chrisjay/afrospeech-wav2vec-kua'), - 'sna':load_model('chrisjay/afrospeech-wav2vec-sna'), - 'yor':load_model('chrisjay/afrospeech-wav2vec-yor'), - 'gax':load_model('chrisjay/afrospeech-wav2vec-gax'), - 'run':load_model('chrisjay/afrospeech-wav2vec-run'), - 'all':load_model('chrisjay/afrospeech-wav2vec-all-6') } - - -def cut_if_necessary(signal,num_samples): - if signal.shape[1] > num_samples: - signal = signal[:, :num_samples] - return signal - -def right_pad_if_necessary(signal,num_samples): - length_signal = signal.shape[1] - if length_signal < num_samples: - num_missing_samples = num_samples - length_signal - last_dim_padding = (0, num_missing_samples) - signal = torch.nn.functional.pad(signal, last_dim_padding) - return signal - -def resample_if_necessary(signal, sr,target_sample_rate,device): - if sr != target_sample_rate: - resampler = torchaudio.transforms.Resample(sr, target_sample_rate).to(device) - signal = resampler(signal) - return signal - -def mix_down_if_necessary(signal): - if signal.shape[0] > 1: - signal = torch.mean(signal, dim=0, keepdim=True) - return signal - - - -def preprocess_audio(waveform,sample_rate,feature_extractor): - - waveform = resample_if_necessary(waveform, sample_rate,16000,device) - waveform = mix_down_if_necessary(waveform) - waveform = cut_if_necessary(waveform,16000) - waveform = right_pad_if_necessary(waveform,16000) - transformed = feature_extractor(waveform,sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True) - return transformed - - - -def make_inference(drop_down,audio): - waveform, sample_rate = torchaudio.load(audio) - preprocessed_audio = preprocess_audio(waveform,sample_rate,feature_extractor) - language_code_chosen = language_dict[drop_down] - model = AUDIO_CLASSIFICATION_MODELS[language_code_chosen] - model.eval() - torch_preprocessed_audio = torch.from_numpy(preprocessed_audio.input_values[0]) - # make prediction - prediction = softmax(model(torch_preprocessed_audio).logits) - - sorted_prediction = torch.sort(prediction,descending=True) - confidences={} - for s,v in zip(sorted_prediction.indices.detach().numpy().tolist()[0],sorted_prediction.values.detach().numpy().tolist()[0]): - confidences.update({s:v}) - return confidences diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Button-9230b6bf.css b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Button-9230b6bf.css deleted file mode 100644 index 0db00b345960eccb6153c841257c085b4b12e38d..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Button-9230b6bf.css +++ /dev/null @@ -1 +0,0 @@ -.block.svelte-kz0ejz{position:relative;margin:0;box-shadow:var(--block-shadow);border-width:var(--block-border-width);border-color:var(--block-border-color);border-radius:var(--block-radius);background:var(--block-background-fill);width:100%;line-height:var(--line-sm)}.block.border_focus.svelte-kz0ejz{border-color:var(--color-accent)}.padded.svelte-kz0ejz{padding:var(--block-padding)}.hidden.svelte-kz0ejz{display:none}.hide-container.svelte-kz0ejz{margin:0;box-shadow:none;border-width:0;background:transparent;padding:0;overflow:visible}div.svelte-e8n7p6{margin-bottom:var(--spacing-lg);color:var(--block-info-text-color);font-weight:var(--block-info-text-weight);font-size:var(--block-info-text-size);line-height:var(--line-sm)}span.has-info.svelte-1gfkn6j{margin-bottom:var(--spacing-xs)}span.svelte-1gfkn6j:not(.has-info){margin-bottom:var(--spacing-lg)}span.svelte-1gfkn6j{display:inline-block;position:relative;z-index:var(--layer-4);border:solid var(--block-title-border-width) var(--block-title-border-color);border-radius:var(--block-title-radius);background:var(--block-title-background-fill);padding:var(--block-title-padding);color:var(--block-title-text-color);font-weight:var(--block-title-text-weight);font-size:var(--block-title-text-size);line-height:var(--line-sm)}.hide.svelte-1gfkn6j{margin:0;height:0}div.svelte-5hfany{display:inline-flex;align-items:center;z-index:var(--layer-2);box-shadow:var(--block-label-shadow);border:var(--block-label-border-width) solid var(--border-color-primary);border-top:none;border-left:none;border-radius:var(--block-label-radius);background:var(--block-label-background-fill);padding:var(--block-label-padding);pointer-events:none;color:var(--block-label-text-color);font-weight:var(--block-label-text-weight);font-size:var(--block-label-text-size);line-height:var(--line-sm)}div.float.svelte-5hfany{position:absolute;top:var(--block-label-margin);left:var(--block-label-margin)}div.svelte-5hfany:not(.float){position:static;margin-top:var(--block-label-margin);margin-left:var(--block-label-margin)}.hide.svelte-5hfany{height:0}span.svelte-5hfany{opacity:.8;margin-right:var(--size-2);width:calc(var(--block-label-text-size) - 1px);height:calc(var(--block-label-text-size) - 1px)}.hide-label.svelte-5hfany{box-shadow:none;border-width:0;background:transparent;overflow:visible}button.svelte-1030q2h{display:flex;justify-content:center;align-items:center;gap:1px;z-index:var(--layer-1);box-shadow:var(--shadow-drop);border:1px solid var(--button-secondary-border-color);border-radius:var(--radius-sm);background:var(--background-fill-primary);padding:2px;color:var(--block-label-text-color)}button.svelte-1030q2h:hover{cursor:pointer;border:2px solid var(--button-secondary-border-color-hover);padding:1px;color:var(--block-label-text-color)}span.svelte-1030q2h{padding:0 1px;font-size:10px}div.svelte-1030q2h{padding:2px;width:14px;height:14px}.pending.svelte-1030q2h{animation:svelte-1030q2h-flash .5s infinite}@keyframes svelte-1030q2h-flash{0%{opacity:.5}50%{opacity:1}to{opacity:.5}}.empty.svelte-lk9eg8{display:flex;justify-content:center;align-items:center;margin-top:calc(0px - var(--size-6));height:var(--size-full)}.icon.svelte-lk9eg8{opacity:.5;height:var(--size-5);color:var(--body-text-color)}.small.svelte-lk9eg8{min-height:calc(var(--size-32) - 20px)}.large.svelte-lk9eg8{min-height:calc(var(--size-64) - 20px)}.unpadded_box.svelte-lk9eg8{margin-top:0}.small_parent.svelte-lk9eg8{min-height:100%!important}.dropdown-arrow.svelte-p5edak{fill:var(--body-text-color);margin-right:var(--size-2);width:var(--size-5)}button.svelte-1jrzxu{display:inline-flex;justify-content:center;align-items:center;transition:var(--button-transition);box-shadow:var(--button-shadow);padding:var(--size-0-5) var(--size-2);text-align:center}button.svelte-1jrzxu:hover,button[disabled].svelte-1jrzxu{box-shadow:var(--button-shadow-hover)}button.svelte-1jrzxu:active{box-shadow:var(--button-shadow-active)}button[disabled].svelte-1jrzxu{opacity:.5;filter:grayscale(30%);cursor:not-allowed}.hide-container.svelte-1jrzxu{display:none}.primary.svelte-1jrzxu{border:var(--button-border-width) solid var(--button-primary-border-color);background:var(--button-primary-background-fill);color:var(--button-primary-text-color)}.primary.svelte-1jrzxu:hover,.primary[disabled].svelte-1jrzxu{border-color:var(--button-primary-border-color-hover);background:var(--button-primary-background-fill-hover);color:var(--button-primary-text-color-hover)}.secondary.svelte-1jrzxu{border:var(--button-border-width) solid var(--button-secondary-border-color);background:var(--button-secondary-background-fill);color:var(--button-secondary-text-color)}.secondary.svelte-1jrzxu:hover,.secondary[disabled].svelte-1jrzxu{border-color:var(--button-secondary-border-color-hover);background:var(--button-secondary-background-fill-hover);color:var(--button-secondary-text-color-hover)}.stop.svelte-1jrzxu{border:var(--button-border-width) solid var(--button-cancel-border-color);background:var(--button-cancel-background-fill);color:var(--button-cancel-text-color)}.stop.svelte-1jrzxu:hover,.stop[disabled].svelte-1jrzxu{border-color:var(--button-cancel-border-color-hover);background:var(--button-cancel-background-fill-hover);color:var(--button-cancel-text-color-hover)}.sm.svelte-1jrzxu{border-radius:var(--button-small-radius);padding:var(--button-small-padding);font-weight:var(--button-small-text-weight);font-size:var(--button-small-text-size)}.lg.svelte-1jrzxu{border-radius:var(--button-large-radius);padding:var(--button-large-padding);font-weight:var(--button-large-text-weight);font-size:var(--button-large-text-size)} diff --git a/spaces/cihyFjudo/fairness-paper-search/Download Go Nagai Art Book and Explore the Legendary Works of the Creator of Devilman.md b/spaces/cihyFjudo/fairness-paper-search/Download Go Nagai Art Book and Explore the Legendary Works of the Creator of Devilman.md deleted file mode 100644 index 51f6671c54f21b49140f9fa645a34f39a7831a45..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Download Go Nagai Art Book and Explore the Legendary Works of the Creator of Devilman.md +++ /dev/null @@ -1,18 +0,0 @@ -
    -

    Miura: It's so long ago that I can't even make an approximate estimation. I guess it's around my kindergarten years since I drew for the first time in my life before I entered an elementary school. I really don't remember the very first moment. All I can remember is that I drew manga first on a notebook for university students during the second grade in elementary school.It was some kind of revelation. To please others or to receive praise by drawing was the happiest thing in my youth. I guess "old habits die hard". My family moved quite often at that time. My drawings enabled me to make new friends in the schools I shifted to. Now that I think of it, it was a time when I already established my identity as a drawer in a way. [Laughs.]

    -

    go nagai art book download


    DOWNLOADhttps://tinurli.com/2uwjyE



    -

    Meanwhile I was one of a group of five friends whose goal was to be mangaka. All of them had their own specialty other than drawing manga, like playing the guitar for example. We influenced and introduced each other saying things like "the ongoing movie is enjoyable" or "it's good to read this book"... "Otherwise, you won't be able to be a mangaka"; this represented well how the group was.

    -

    There was a thing that today's high school students can't understand: in my days, friends were also some kind of rivals. So I wanted to look great to the others. What should I do to look great? I had to watch movies and read books. Repeating this, I learned that manga isn't only about drawing. I acquired the ability to conceive a story while I was a university student. Precisely speaking, when I entered for a prize in my university days.

    -

    Miura: Berserk is my very first comic book and anime. So I was very excited, and I wanted to make something good. I could've just let the studio staff do the work, but I gave some advice on the outlines of the character designs. But my main concern was the scripts. They'd send me the scripts and I'd revise them and make changes. I checked all scripts, and made a lot of changes and requests on all of them. I bet the writers hated me.

    -

    Miura: There's no time, so I have to choose materials that are exemplary or whatever. Researching down to the details is impossible unless you go at it using human-wave tactics. Furthermore, in my case I'm focusing more on the author's conclusions as I cursorily read a book for its theme and information. Like, for the witch hunt in the Conviction arc, what kind of thing does the author of these materials think witch hunts were? I read two or three books for the Conviction arc, but what I learned from them was that witch hunts represented an unseen fear in the Middle Ages that people collectively embodied. When people experienced fear, they ended up manifesting it, and a group manifestation would turn into a witch hunt.

    -

    -

    Miura: I depicted witch trials during the Conviction arc, so I was going to end up having to do witches and magic. I went in search of lots of reference books on magic, and amongst them was a book written by someone claiming to be a real magician. It sounds shady at first, but there are magic users overseas and they have the authoritative opinions on the subject. I thought I'd faithfully portray the way a real magician conceptualizes magic. In Japan, the game-like magic where you throw fireballs and such is typical, but naturally there's a concept out there in the world of actual "magic." This gets a little off-topic, but I heard this once in some documentary: "If you want to make a movie that rivals Star Wars, you can't watch Star Wars. Go watch what George Lucas was watching for the purpose of making Star Wars." Follow what's already been depicted, and you might just end up with an inferior copy.

    -

    Miura: It's an unfortunate business, I can't enjoy what I create. I look back at the books as reference material for my writing, but I don't read through them often. I probably won't be able to reflect on my work until many years after the serialization of Berserk is finished.

    -

    Miura: The management and selection of drawings are entirely up to my staff. However, I can assure you that not a single drawing has been skimped on, so I can guarantee the quality of the exhibition. As for myself, I would be embarrassed to hold an exhibition, but ... (laughs). I have one request: since manga artists are mainly engaged in the business of black-and-white manuscripts rather than color, I would like to see the black-and-white manuscripts that I usually draw the most exhibited as much as possible. I would like visitors to enjoy the details of the raw manuscripts, the white corrections, and other aspects of the manuscripts that cannot be seen in the book.

    -

    Miura: It is not that dramatic. The nature of color illustrations for manga is that they are used for book covers and magazine covers, so there is a limit to what can be drawn. The space available is surprisingly limited because of the logo and title, and in the case of a volume, the illustration has to be eye-catching in the bookstore. So I end up filling up most of the screen with characters. Many manga artists try to draw the main characters of the book as large as possible. I have always liked the oil painting style of Frank Frazetta and Noriyoshi Ohrai, so my coloring style is in the same vein as their drawings. In the beginning, I used oil paints, but they dried very slowly, so I painted on canvas by base coating with oil-based paint, and then painted with semi-transparent Liquitex. However, if I tried to create subtle gradations with this method, it would take a long time to paint over and over again. In other words, it was difficult to create skin tones for children and girls and was not suitable for cute characters. I ended up creating drawings of characters with a strong sense of power, like Guts. Nowadays, I can say that this is part of my taste.

    -

    Miura: If manga is continued for a long time, there is bound to be inflation of characters and techniques, but this inflation needs to be controlled. In a weekly serialization, the manga may be drawn with a lot of momentum, and it may become difficult to keep up at the end. That is why I try to keep inflation as low as possible. In Berserk, there are two major inflationary events: the time when magic is introduced, and the time when the berserker armor is introduced. In both cases, the parts of Berserk that we have been focusing on until now are taken to the next stage. When magic appears, the worldview changes, and when Guts moves in an unusual manner in the berserker armor, he deviates one step from the "human physical senses" that we have been depicting until now. We have to be aware of this and create some inflation. The color paintings I just mentioned were done when I felt that the inflation had worked and I had a good response. The calendar drawings and card game drawings that I did with light colors are also memorable because they are different from my usual color drawings. I enjoyed being able to draw scenes of Guts and his friends in their everyday lives without being bound by the restrictions of a book, and for a while, I even drew light-colored pictures on the backs of the magazine's pinups. I wanted to draw travel scenes with these drawings because they are suited to depicting everyday scenes.

    -

    Miura: What sticks in my mind the most is when the manga became a hit when it was first made into an anime (Kenpu Denki Berserk in 1997) and I saw a stack of copies of the manga in a local bookstore. It felt somewhat fluffy and unreal. I think it is a truly lucky thing for a manga artist to have such an experience, and I will never forget it.

    -

    Some quick navigation links: Shop favorites and bestsellers, European imports, surrealism, recent arrivals, gift books, gift cards, graphic design, NYRB, Wakefield Press, Tiger Tateishi, used books, vintage magazines, zines.

    -

    Chino Aoshima (Reinforcement) Santiago Nieto L.. Chino Aoshima Chino is a 38 years old japanese artist graduated from economics at Hosei University, Tokyo.\n \n \n \n \n "," \n \n \n \n \n \n Writing Paper for Earth Day 4 Different Line Designs with Ideas By: Sarah Tharpe Winchell 2015 Teaching Resources for the Classroom.\n \n \n \n \n "," \n \n \n \n \n \n Unit 1 \u2013 Improving Productivity Instructions ~ 100 words per box.\n \n \n \n \n "," \n \n \n \n \n \n Unit 1 \u2013 Improving Productivity Instructions ~ 100 words per box.\n \n \n \n \n "," \n \n \n \n \n \n Creating Animation By Rashan Anderson 7th grade What is Animation? Animation is the rapid display of a sequence of images in 2-D or 3-D artwork or model.\n \n \n \n \n "," \n \n \n \n \n \n HOKUSAI HOKUSAI\u2019S LIFE Hokusai was born in Japan in 1760, and lived to be 89 years old! Hokusai worked in the medium of printmaking, which.\n \n \n \n \n "," \n \n \n \n \n \n Katsushika Hokusai PRINTMAKING Waterfall from Thirty-six Views of Mount Fuji.\n \n \n \n \n "," \n \n \n \n \n \n JAMES COLEMAN. James Coleman James Coleman was born in 1949 in Hollywood, California. He got his first job at Disney Studios, in the mailroom, through.\n \n \n \n \n "," \n \n \n \n \n \n ANIME \u30a2\u30cb\u30e1. Origins Of Anime Anime began at the start of the 20th century, when Japanese filmmakers experimented with the animation techniques that were.\n \n \n \n \n "," \n \n \n \n \n \n A ni me \u30a2\u30f3\u30c9\u30ea\u30e5\u3001 (Andrew) \u30b3\u30ca\u30fc\u3001 (Connor) \u30b9\u30da\u30f3\u30b5\u30fc (Spencer) By:\n \n \n \n \n "," \n \n \n \n \n \n Renee Sammet. What Is Psykopaint.com? Psychopaint.com is a website that facilitates abstract photo editing. It is extremely user friendly and fast.\n \n \n \n \n "," \n \n \n \n \n \n EASTERN DIGITAL ART By: Andres Giraldo Jaramillo.\n \n \n \n \n "," \n \n \n \n \n \n PDM THEORY BY ALEX MARDEN. ARTIST The artist is influenced by what people enjoy watching these days like internet pheonomas The artist is up to date with.\n \n \n \n \n "," \n \n \n \n \n \n Chapter 1.\n \n \n \n \n "," \n \n \n \n \n \n Japanese Art. Cultural Influences The Japanese have been influenced by many different cultures including the Chinese, Koreans and Europeans. However these.\n \n \n \n \n "," \n \n \n \n \n \n Sketchbook Homework You will be graded on: Fulfilled all points of assignment 1-25 points Used entire page with Value (color optional)1-25 points Page.\n \n \n \n \n "," \n \n \n \n \n \n MangaManga Set up a page for Cornell notes. Manga Manga are basically Japanese comic books \u201cManga\u201d literally translated means \u201cwhimsical or random pictures\u201d\n \n \n \n \n "," \n \n \n \n \n \n \uf09e You will be creating a cartoon in Adobe Illustrator using the paintbrush tool \uf09e You will learn about layers, different brushes, and different strokes.\n \n \n \n \n "," \n \n \n \n \n \n Plot The series of events in a story A wizard\u2019s parents are killed by the evil Lord Voldemort. He attends school at Hogwarts where he must continue to.\n \n \n \n \n "," \n \n \n \n \n \n Henri Matisse. Henri Matisse was born in France in His father sold seeds and grain and his mother was a dressmaker. At the age of 20 Henri was studying.\n \n \n \n \n "," \n \n \n \n \n \n My Comparing Art Work By Diederich Rodas. My Art Work 0 This is a great drawing of mine. \u201cOutside of Smitha\u201d 0 I was inspired by a piece of artwork of.\n \n \n \n \n "," \n \n \n \n \n \n ART Critique Process Art Critiquing process is about organizing your thoughts about a particular piece of art.\n \n \n \n \n "," \n \n \n \n \n \n Manga Art By Nicole. Manga art is a Japanese form of art. It has also produced quiet a few well known cartoon characters such as Sailor Moon and Naroto.\n \n \n \n \n "," \n \n \n \n \n \n Manga....The traditional Japanese art....By Maigan Banks.\n \n \n \n \n "," \n \n \n \n \n \n Critical Study on Artist Lesson 3. Connector Write down at least 5 sentences describing how the work below right is different from the other portraits:\n \n \n \n \n "," \n \n \n \n \n \n REINFORCEMENT FOR EXTRA GRADES IMD1- CTD1-13 EMILIO ANDRADE\n \n \n \n \n "," \n \n \n \n \n \n By Gabrielle Punzalan I selected Albrecht D\u00fcrer\u2019s The Last Judgement Drawing to compare to My Protection Drawing. Albrecht D\u00fcrer\u2019s detailed technique of.\n \n \n \n \n "," \n \n \n \n \n \n QOTD: Picasso\u2019s most famous style of painting is known as________. Students will create a work of art in the style of Cubism inspired by the artist Pablo.\n \n \n \n \n "," \n \n \n \n \n \n When Georgia O\u2019Keeffe was 12 years old she knew she wanted to be an artist. She went to art schools and even was an art teacher for awhile. Her true love,\n \n \n \n \n "," \n \n \n \n \n \n 2004 Olympic Poster Competition You Be the Judge!\n \n \n \n \n "," \n \n \n \n \n \n Plot (n) character (n) conflict (n) theme (n) setting (n) noun: person, place, thing or idea Literary elements: the things that make up a short story.\n \n \n \n \n "," \n \n \n \n \n \n Day 9 Artist\u2019s Choice. Elements of Art and Principles of Design Choose AT LEAST 2 Elements and 2 Principles to use in your artwork. This is a test of.\n \n \n \n \n "," \n \n \n \n \n \n ART EXPOSITION JUAN CAMILO TRUJILLO SIERRA 7\u00b0C. INDEX Problemic Surrounding Visual Elements Technique Artistic Reference Bibliography.\n \n \n \n \n "," \n \n \n \n \n \n Rosalind Monks: Art Rosalind Monks is the first artist you will be studying in art. Monks is an artist that uses line within her work. She uses lines.\n \n \n \n \n "," \n \n \n \n \n \n Unit 5 China and the World\n \n \n \n \n "," \n \n \n \n \n \n Critical Study on Artist Lesson 3\n \n \n \n \n "," \n \n \n \n \n \n Recovery presentation\n \n \n \n \n "," \n \n \n \n \n \n Mini GCSE Project \u2013 Manga Comic\n \n \n \n \n "," \n \n \n \n \n \n Three original animations by\n \n \n \n \n "," \n \n \n \n \n \n The Elements of Fiction\n \n \n \n \n "," \n \n \n \n \n \n This plan belongs to: Shayleen, MYP 3 Design and Art\n \n \n \n \n "," \n \n \n \n \n \n The Steps of Art Criticism In each step of art criticism, you are\n \n \n \n \n "," \n \n \n \n \n \n IHO Visual Arts Round One.\n \n \n \n \n "," \n \n \n \n \n \n The Steps of Art Criticism\n \n \n \n \n "," \n \n \n \n \n \n The Eyes Have It Art 4 exam.\n \n \n \n \n "," \n \n \n \n \n \n Animal Machines Art 4.\n \n \n \n \n "," \n \n \n \n \n \n Demo: Kawaii Art Drawing CENTER Stick to it! Don\u2019t give up!\n \n \n \n \n "," \n \n \n \n \n \n An Introduction to Pointillism\n \n \n \n \n "," \n \n \n \n \n \n Theme Drawing 8th Grade.\n \n \n \n \n "," \n \n \n \n \n \n America and the World in the 1930s\n \n \n \n \n "]; Similar presentations

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/ERECTILE DYSFUNCTION How To Get Rock-Solid Erecti Battaglia Nuove Worl HOT.md b/spaces/cihyFjudo/fairness-paper-search/ERECTILE DYSFUNCTION How To Get Rock-Solid Erecti Battaglia Nuove Worl HOT.md deleted file mode 100644 index 0614e03532b03eb5c06b8d9f6042a8453cc05080..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/ERECTILE DYSFUNCTION How To Get Rock-Solid Erecti Battaglia Nuove Worl HOT.md +++ /dev/null @@ -1,7 +0,0 @@ -
    -

    Kernohan, A. F., McIntyre, M., Hughes, D. M., Tam, S. W., Worcel, M., and Reid, J. L. An oral yohimbine/L-arginine combination (NMI 861) for the treatment of male erectile dysfunction: a pharmacokinetic, pharmacodynamic and interaction study with intravenous nitroglycerine in healthy male subjects. Br J Clin.Pharmacol. 2005;59(1):85-93. View abstract.

    -

    Chen J, Wollman Y, Chernichovsky T, et al. Effect of oral administration of high-dose nitric oxide donor L-arginine in men with organic erectile dysfunction: results of a double-blind, randomized, placebo-controlled study. BJU Int 1999;83:269-73. View abstract.

    -

    ERECTILE DYSFUNCTION: How To Get Rock-Solid Erecti battaglia nuove worl


    Download Zip ››››› https://tinurli.com/2uwkVk



    -

    Penile prosthesis (PP) has been an option for males with erectile dysfunction since first introduced in 1973.[1] Since that time, various devices have been implanted with the goal of improving sexual function in men affected by erectile dysfunction (ED). As the worldwide prevalence of ED is expected to increase to over 322 million by 2025, the demand for implantable PP should continue to rise.[2] Associated factors include the aging population as well as a sustained increase in conditions such as obesity, diabetes, and cardiovascular disease that may lead to diminished erectile function.[3]

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Resident Evil Operation Raccoon City Trainer v 1.2.1803.128 Unlock All Weapons and Skills.md b/spaces/cihyFjudo/fairness-paper-search/Resident Evil Operation Raccoon City Trainer v 1.2.1803.128 Unlock All Weapons and Skills.md deleted file mode 100644 index 113754b62c2239650788c54ec31698be1e00fd7c..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Resident Evil Operation Raccoon City Trainer v 1.2.1803.128 Unlock All Weapons and Skills.md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    magix wireless serial
    video pemerkosaan abg
    regedit pes 2012
    666-Welcome Back To The Grave..mp3
    ntfs dos pro 5 full version
    Resident evil operation raccoon city pc trainer 1.2.1803.128 UNLOCKED
    david sanchez discography download torrent
    free download 3gp movie fast furious 6 in hindi 1

    áàðáè â íüþ éîðêå ñêà÷àòü èãðó
    Lylloo nue star nue fake
    Inksaver Serial 4
    Birdy Nam Nam - Defiant Order (2011)
    microelectronic circuit design jaeger solution manual
    Zero Woman 2005 ( Full Download )
    Daemon Tools Lite 4.35.6 Crack Serial Keygen Rapidshare Full Download.rar
    [Top rated] film indian mandrie si onoare 1
    mathematica 9.0.1 keygen

    Elbow - Asleep In The Back (2001) [128-254 Kbs] by pandaking

    download october 2010 sat pdf
    Xxxnaruto fuq
    Irecmode.rar
    hindi wollywoodvideos.com
    Cant.Be.Sanford.And.Son.Its.A.XXX.Parody.XXX.DVDRip.XviD STA

    ww2 pacific heroes activation code
    w7xe exe.rar
    download para validar windows 8 pro build 9200 permanente
    Steinberg Mastering Edition V1.0 VST
    xara 3d maker 7 serial number
    partition piano yalla calogero
    arma_crack_1.18.rar-adds
    unlock_phone_1.33_crack-adds
    Serial Lotto Sorcerer V7
    windows 8 pro with media center build 9200 activation x86 x64 download

    Nikon Camera Control Pro v2 0 Cracked-LAMA
    malayalam kambi story free download.zip hit

    Registration Key For Reginout-adds

    NEED FOR SPEED RIVALS CRACK
    We chat for nokia c2-01

    Abg jepang di perkosa ramd rame
    win8tp3 setup unique product key-adds
    INFE
    barn yarn game full torrent
    windows 7 professional 64-bit svenska torrent
    rambo 5 movie download
    nokia_5233_spb_shell_latest_version_free_download-adds

    real life pre intermediate test master rapidshare
    Patch francais acdsee pro 6
    Cisco IOS images for Dynamips -Routers.zip
    Love Hina 19 DVD(H264 AAC)[KAA][ mkv
    solidcam 2013 64bit crack
    The Lovely Bones(2009)(promo)(nlsubs)2lions-team
    3gp sawan full movie salman khan free download

    download Son rape while mother sleeps video 3gp
    Marli - Hoje NA?o Tem ManiA§oba (maxi-single).zip
    (b) Omar Galanti - group sex with trans.avi
    Fire In The Valley: Female Genital Massage DVD
    Feu vert 2013 gratuit
    adobe photoshop cc 14 0 final multilingual photoshop x86 x64 cwz
    navigon europe 4.8.0 cracked torrent free
    Indian Hindi Real Maa Bete Ki Sexy Stories
    Poughkeepsie - Debra Anastasia.epub
    ??? ???????????????????? ???????????? ??????????
    ngintip-abg-mandi-di-sungai-3gp.

    -

    resident evil operation raccoon city trainer v 1.2.1803.128


    DOWNLOADhttps://tinurli.com/2uwiLy



    -

    resident evil operation raccoon city trainer v 1.2.1803.128 [url= -hd-online-player-sultan-movie-in-tamil-in-hd] -hd-online-player-sultan-movie-in-tamil-in-hd[/url]studio one 3 license file download [url= -sur-kshetra-episode-3-full-37]Download[/url] Taiseertaids [url= -solucionariovariablecomplejaserieschaummurrayspiegel]trello[/url] melsAtterve [url= -plagiarism-detector-order-reference-number-crack-5]trello[/url] free test 2009 code rousseau maroc telecharger [url= -the-thieves-2012-english-subtitles-torrent-download-links]trello.com[/url] FULL Adobe Illustrator CC 2018 25.2.1 (64-Bit) Crack [url= -free-download-motorola-iden-cns-unlock-ver-7043]trello.com[/url] download free fifa 2006 pc game full version 100 works [url= -autodesk-autocad-2018-836-x86x64-keygen-crack-serial-key-keygenl]trello.com[/url] ReFWocheNuththegodat [url= -emergency4deluxeeditiongerman-fasiso]trello.com[/url]EquantyroarkPata [url= -leoschamrothanintroductiontoelectrocardiographypdf21]trello[/url] flissinneple [url= -2d-truss-analysis-2-0-keygen-24] -2d-truss-analysis-2-0-keygen-24[/url]

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/MicImagePlugin.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/MicImagePlugin.py deleted file mode 100644 index 801318930d515426a186a7524f25ef7c342dec7a..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/MicImagePlugin.py +++ /dev/null @@ -1,103 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# Microsoft Image Composer support for PIL -# -# Notes: -# uses TiffImagePlugin.py to read the actual image streams -# -# History: -# 97-01-20 fl Created -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1997. -# -# See the README file for information on usage and redistribution. -# - - -import olefile - -from . import Image, TiffImagePlugin - -# -# -------------------------------------------------------------------- - - -def _accept(prefix): - return prefix[:8] == olefile.MAGIC - - -## -# Image plugin for Microsoft's Image Composer file format. - - -class MicImageFile(TiffImagePlugin.TiffImageFile): - format = "MIC" - format_description = "Microsoft Image Composer" - _close_exclusive_fp_after_loading = False - - def _open(self): - # read the OLE directory and see if this is a likely - # to be a Microsoft Image Composer file - - try: - self.ole = olefile.OleFileIO(self.fp) - except OSError as e: - msg = "not an MIC file; invalid OLE file" - raise SyntaxError(msg) from e - - # find ACI subfiles with Image members (maybe not the - # best way to identify MIC files, but what the... ;-) - - self.images = [] - for path in self.ole.listdir(): - if path[1:] and path[0][-4:] == ".ACI" and path[1] == "Image": - self.images.append(path) - - # if we didn't find any images, this is probably not - # an MIC file. - if not self.images: - msg = "not an MIC file; no image entries" - raise SyntaxError(msg) - - self.frame = None - self._n_frames = len(self.images) - self.is_animated = self._n_frames > 1 - - self.seek(0) - - def seek(self, frame): - if not self._seek_check(frame): - return - try: - filename = self.images[frame] - except IndexError as e: - msg = "no such frame" - raise EOFError(msg) from e - - self.fp = self.ole.openstream(filename) - - TiffImagePlugin.TiffImageFile._open(self) - - self.frame = frame - - def tell(self): - return self.frame - - def close(self): - self.ole.close() - super().close() - - def __exit__(self, *args): - self.ole.close() - super().__exit__() - - -# -# -------------------------------------------------------------------- - -Image.register_open(MicImageFile.format, MicImageFile, _accept) - -Image.register_extension(MicImageFile.format, ".mic") diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/MspImagePlugin.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/MspImagePlugin.py deleted file mode 100644 index c6567b2ae626fd83ef21575a59374c922d5392a9..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/MspImagePlugin.py +++ /dev/null @@ -1,194 +0,0 @@ -# -# The Python Imaging Library. -# -# MSP file handling -# -# This is the format used by the Paint program in Windows 1 and 2. -# -# History: -# 95-09-05 fl Created -# 97-01-03 fl Read/write MSP images -# 17-02-21 es Fixed RLE interpretation -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1995-97. -# Copyright (c) Eric Soroos 2017. -# -# See the README file for information on usage and redistribution. -# -# More info on this format: https://archive.org/details/gg243631 -# Page 313: -# Figure 205. Windows Paint Version 1: "DanM" Format -# Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03 -# -# See also: https://www.fileformat.info/format/mspaint/egff.htm - -import io -import struct - -from . import Image, ImageFile -from ._binary import i16le as i16 -from ._binary import o16le as o16 - -# -# read MSP files - - -def _accept(prefix): - return prefix[:4] in [b"DanM", b"LinS"] - - -## -# Image plugin for Windows MSP images. This plugin supports both -# uncompressed (Windows 1.0). - - -class MspImageFile(ImageFile.ImageFile): - format = "MSP" - format_description = "Windows Paint" - - def _open(self): - # Header - s = self.fp.read(32) - if not _accept(s): - msg = "not an MSP file" - raise SyntaxError(msg) - - # Header checksum - checksum = 0 - for i in range(0, 32, 2): - checksum = checksum ^ i16(s, i) - if checksum != 0: - msg = "bad MSP checksum" - raise SyntaxError(msg) - - self.mode = "1" - self._size = i16(s, 4), i16(s, 6) - - if s[:4] == b"DanM": - self.tile = [("raw", (0, 0) + self.size, 32, ("1", 0, 1))] - else: - self.tile = [("MSP", (0, 0) + self.size, 32, None)] - - -class MspDecoder(ImageFile.PyDecoder): - # The algo for the MSP decoder is from - # https://www.fileformat.info/format/mspaint/egff.htm - # cc-by-attribution -- That page references is taken from the - # Encyclopedia of Graphics File Formats and is licensed by - # O'Reilly under the Creative Common/Attribution license - # - # For RLE encoded files, the 32byte header is followed by a scan - # line map, encoded as one 16bit word of encoded byte length per - # line. - # - # NOTE: the encoded length of the line can be 0. This was not - # handled in the previous version of this encoder, and there's no - # mention of how to handle it in the documentation. From the few - # examples I've seen, I've assumed that it is a fill of the - # background color, in this case, white. - # - # - # Pseudocode of the decoder: - # Read a BYTE value as the RunType - # If the RunType value is zero - # Read next byte as the RunCount - # Read the next byte as the RunValue - # Write the RunValue byte RunCount times - # If the RunType value is non-zero - # Use this value as the RunCount - # Read and write the next RunCount bytes literally - # - # e.g.: - # 0x00 03 ff 05 00 01 02 03 04 - # would yield the bytes: - # 0xff ff ff 00 01 02 03 04 - # - # which are then interpreted as a bit packed mode '1' image - - _pulls_fd = True - - def decode(self, buffer): - img = io.BytesIO() - blank_line = bytearray((0xFF,) * ((self.state.xsize + 7) // 8)) - try: - self.fd.seek(32) - rowmap = struct.unpack_from( - f"<{self.state.ysize}H", self.fd.read(self.state.ysize * 2) - ) - except struct.error as e: - msg = "Truncated MSP file in row map" - raise OSError(msg) from e - - for x, rowlen in enumerate(rowmap): - try: - if rowlen == 0: - img.write(blank_line) - continue - row = self.fd.read(rowlen) - if len(row) != rowlen: - msg = f"Truncated MSP file, expected {rowlen} bytes on row {x}" - raise OSError(msg) - idx = 0 - while idx < rowlen: - runtype = row[idx] - idx += 1 - if runtype == 0: - (runcount, runval) = struct.unpack_from("Bc", row, idx) - img.write(runval * runcount) - idx += 2 - else: - runcount = runtype - img.write(row[idx : idx + runcount]) - idx += runcount - - except struct.error as e: - msg = f"Corrupted MSP file in row {x}" - raise OSError(msg) from e - - self.set_as_raw(img.getvalue(), ("1", 0, 1)) - - return -1, 0 - - -Image.register_decoder("MSP", MspDecoder) - - -# -# write MSP files (uncompressed only) - - -def _save(im, fp, filename): - if im.mode != "1": - msg = f"cannot write mode {im.mode} as MSP" - raise OSError(msg) - - # create MSP header - header = [0] * 16 - - header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1 - header[2], header[3] = im.size - header[4], header[5] = 1, 1 - header[6], header[7] = 1, 1 - header[8], header[9] = im.size - - checksum = 0 - for h in header: - checksum = checksum ^ h - header[12] = checksum # FIXME: is this the right field? - - # header - for h in header: - fp.write(o16(h)) - - # image body - ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 32, ("1", 0, 1))]) - - -# -# registry - -Image.register_open(MspImageFile.format, MspImageFile, _accept) -Image.register_save(MspImageFile.format, _save) - -Image.register_extension(MspImageFile.format, ".msp") diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fastapi/applications.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fastapi/applications.py deleted file mode 100644 index e32cfa03d20cbfd8ee588b943d15cf1b38e2b951..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fastapi/applications.py +++ /dev/null @@ -1,942 +0,0 @@ -from enum import Enum -from typing import ( - Any, - Awaitable, - Callable, - Coroutine, - Dict, - List, - Optional, - Sequence, - Type, - TypeVar, - Union, -) - -from fastapi import routing -from fastapi.datastructures import Default, DefaultPlaceholder -from fastapi.exception_handlers import ( - http_exception_handler, - request_validation_exception_handler, - websocket_request_validation_exception_handler, -) -from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError -from fastapi.logger import logger -from fastapi.middleware.asyncexitstack import AsyncExitStackMiddleware -from fastapi.openapi.docs import ( - get_redoc_html, - get_swagger_ui_html, - get_swagger_ui_oauth2_redirect_html, -) -from fastapi.openapi.utils import get_openapi -from fastapi.params import Depends -from fastapi.types import DecoratedCallable, IncEx -from fastapi.utils import generate_unique_id -from starlette.applications import Starlette -from starlette.datastructures import State -from starlette.exceptions import HTTPException -from starlette.middleware import Middleware -from starlette.middleware.base import BaseHTTPMiddleware -from starlette.middleware.errors import ServerErrorMiddleware -from starlette.middleware.exceptions import ExceptionMiddleware -from starlette.requests import Request -from starlette.responses import HTMLResponse, JSONResponse, Response -from starlette.routing import BaseRoute -from starlette.types import ASGIApp, Lifespan, Receive, Scope, Send - -AppType = TypeVar("AppType", bound="FastAPI") - - -class FastAPI(Starlette): - def __init__( - self: AppType, - *, - debug: bool = False, - routes: Optional[List[BaseRoute]] = None, - title: str = "FastAPI", - summary: Optional[str] = None, - description: str = "", - version: str = "0.1.0", - openapi_url: Optional[str] = "/openapi.json", - openapi_tags: Optional[List[Dict[str, Any]]] = None, - servers: Optional[List[Dict[str, Union[str, Any]]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - default_response_class: Type[Response] = Default(JSONResponse), - redirect_slashes: bool = True, - docs_url: Optional[str] = "/docs", - redoc_url: Optional[str] = "/redoc", - swagger_ui_oauth2_redirect_url: Optional[str] = "/docs/oauth2-redirect", - swagger_ui_init_oauth: Optional[Dict[str, Any]] = None, - middleware: Optional[Sequence[Middleware]] = None, - exception_handlers: Optional[ - Dict[ - Union[int, Type[Exception]], - Callable[[Request, Any], Coroutine[Any, Any, Response]], - ] - ] = None, - on_startup: Optional[Sequence[Callable[[], Any]]] = None, - on_shutdown: Optional[Sequence[Callable[[], Any]]] = None, - lifespan: Optional[Lifespan[AppType]] = None, - terms_of_service: Optional[str] = None, - contact: Optional[Dict[str, Union[str, Any]]] = None, - license_info: Optional[Dict[str, Union[str, Any]]] = None, - openapi_prefix: str = "", - root_path: str = "", - root_path_in_servers: bool = True, - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - callbacks: Optional[List[BaseRoute]] = None, - webhooks: Optional[routing.APIRouter] = None, - deprecated: Optional[bool] = None, - include_in_schema: bool = True, - swagger_ui_parameters: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - **extra: Any, - ) -> None: - self.debug = debug - self.title = title - self.summary = summary - self.description = description - self.version = version - self.terms_of_service = terms_of_service - self.contact = contact - self.license_info = license_info - self.openapi_url = openapi_url - self.openapi_tags = openapi_tags - self.root_path_in_servers = root_path_in_servers - self.docs_url = docs_url - self.redoc_url = redoc_url - self.swagger_ui_oauth2_redirect_url = swagger_ui_oauth2_redirect_url - self.swagger_ui_init_oauth = swagger_ui_init_oauth - self.swagger_ui_parameters = swagger_ui_parameters - self.servers = servers or [] - self.extra = extra - self.openapi_version = "3.1.0" - self.openapi_schema: Optional[Dict[str, Any]] = None - if self.openapi_url: - assert self.title, "A title must be provided for OpenAPI, e.g.: 'My API'" - assert self.version, "A version must be provided for OpenAPI, e.g.: '2.1.0'" - # TODO: remove when discarding the openapi_prefix parameter - if openapi_prefix: - logger.warning( - '"openapi_prefix" has been deprecated in favor of "root_path", which ' - "follows more closely the ASGI standard, is simpler, and more " - "automatic. Check the docs at " - "https://fastapi.tiangolo.com/advanced/sub-applications/" - ) - self.webhooks = webhooks or routing.APIRouter() - self.root_path = root_path or openapi_prefix - self.state: State = State() - self.dependency_overrides: Dict[Callable[..., Any], Callable[..., Any]] = {} - self.router: routing.APIRouter = routing.APIRouter( - routes=routes, - redirect_slashes=redirect_slashes, - dependency_overrides_provider=self, - on_startup=on_startup, - on_shutdown=on_shutdown, - lifespan=lifespan, - default_response_class=default_response_class, - dependencies=dependencies, - callbacks=callbacks, - deprecated=deprecated, - include_in_schema=include_in_schema, - responses=responses, - generate_unique_id_function=generate_unique_id_function, - ) - self.exception_handlers: Dict[ - Any, Callable[[Request, Any], Union[Response, Awaitable[Response]]] - ] = ({} if exception_handlers is None else dict(exception_handlers)) - self.exception_handlers.setdefault(HTTPException, http_exception_handler) - self.exception_handlers.setdefault( - RequestValidationError, request_validation_exception_handler - ) - self.exception_handlers.setdefault( - WebSocketRequestValidationError, - # Starlette still has incorrect type specification for the handlers - websocket_request_validation_exception_handler, # type: ignore - ) - - self.user_middleware: List[Middleware] = ( - [] if middleware is None else list(middleware) - ) - self.middleware_stack: Union[ASGIApp, None] = None - self.setup() - - def build_middleware_stack(self) -> ASGIApp: - # Duplicate/override from Starlette to add AsyncExitStackMiddleware - # inside of ExceptionMiddleware, inside of custom user middlewares - debug = self.debug - error_handler = None - exception_handlers = {} - - for key, value in self.exception_handlers.items(): - if key in (500, Exception): - error_handler = value - else: - exception_handlers[key] = value - - middleware = ( - [Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)] - + self.user_middleware - + [ - Middleware( - ExceptionMiddleware, handlers=exception_handlers, debug=debug - ), - # Add FastAPI-specific AsyncExitStackMiddleware for dependencies with - # contextvars. - # This needs to happen after user middlewares because those create a - # new contextvars context copy by using a new AnyIO task group. - # The initial part of dependencies with yield is executed in the - # FastAPI code, inside all the middlewares, but the teardown part - # (after yield) is executed in the AsyncExitStack in this middleware, - # if the AsyncExitStack lived outside of the custom middlewares and - # contextvars were set in a dependency with yield in that internal - # contextvars context, the values would not be available in the - # outside context of the AsyncExitStack. - # By putting the middleware and the AsyncExitStack here, inside all - # user middlewares, the code before and after yield in dependencies - # with yield is executed in the same contextvars context, so all values - # set in contextvars before yield is still available after yield as - # would be expected. - # Additionally, by having this AsyncExitStack here, after the - # ExceptionMiddleware, now dependencies can catch handled exceptions, - # e.g. HTTPException, to customize the teardown code (e.g. DB session - # rollback). - Middleware(AsyncExitStackMiddleware), - ] - ) - - app = self.router - for cls, options in reversed(middleware): - app = cls(app=app, **options) - return app - - def openapi(self) -> Dict[str, Any]: - if not self.openapi_schema: - self.openapi_schema = get_openapi( - title=self.title, - version=self.version, - openapi_version=self.openapi_version, - summary=self.summary, - description=self.description, - terms_of_service=self.terms_of_service, - contact=self.contact, - license_info=self.license_info, - routes=self.routes, - webhooks=self.webhooks.routes, - tags=self.openapi_tags, - servers=self.servers, - ) - return self.openapi_schema - - def setup(self) -> None: - if self.openapi_url: - urls = (server_data.get("url") for server_data in self.servers) - server_urls = {url for url in urls if url} - - async def openapi(req: Request) -> JSONResponse: - root_path = req.scope.get("root_path", "").rstrip("/") - if root_path not in server_urls: - if root_path and self.root_path_in_servers: - self.servers.insert(0, {"url": root_path}) - server_urls.add(root_path) - return JSONResponse(self.openapi()) - - self.add_route(self.openapi_url, openapi, include_in_schema=False) - if self.openapi_url and self.docs_url: - - async def swagger_ui_html(req: Request) -> HTMLResponse: - root_path = req.scope.get("root_path", "").rstrip("/") - openapi_url = root_path + self.openapi_url - oauth2_redirect_url = self.swagger_ui_oauth2_redirect_url - if oauth2_redirect_url: - oauth2_redirect_url = root_path + oauth2_redirect_url - return get_swagger_ui_html( - openapi_url=openapi_url, - title=self.title + " - Swagger UI", - oauth2_redirect_url=oauth2_redirect_url, - init_oauth=self.swagger_ui_init_oauth, - swagger_ui_parameters=self.swagger_ui_parameters, - ) - - self.add_route(self.docs_url, swagger_ui_html, include_in_schema=False) - - if self.swagger_ui_oauth2_redirect_url: - - async def swagger_ui_redirect(req: Request) -> HTMLResponse: - return get_swagger_ui_oauth2_redirect_html() - - self.add_route( - self.swagger_ui_oauth2_redirect_url, - swagger_ui_redirect, - include_in_schema=False, - ) - if self.openapi_url and self.redoc_url: - - async def redoc_html(req: Request) -> HTMLResponse: - root_path = req.scope.get("root_path", "").rstrip("/") - openapi_url = root_path + self.openapi_url - return get_redoc_html( - openapi_url=openapi_url, title=self.title + " - ReDoc" - ) - - self.add_route(self.redoc_url, redoc_html, include_in_schema=False) - - async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: - if self.root_path: - scope["root_path"] = self.root_path - await super().__call__(scope, receive, send) - - def add_api_route( - self, - path: str, - endpoint: Callable[..., Coroutine[Any, Any, Response]], - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - methods: Optional[List[str]] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Union[Type[Response], DefaultPlaceholder] = Default( - JSONResponse - ), - name: Optional[str] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> None: - self.router.add_api_route( - path, - endpoint=endpoint, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - methods=methods, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - - def api_route( - self, - path: str, - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - methods: Optional[List[str]] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Type[Response] = Default(JSONResponse), - name: Optional[str] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - def decorator(func: DecoratedCallable) -> DecoratedCallable: - self.router.add_api_route( - path, - func, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - methods=methods, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - return func - - return decorator - - def add_api_websocket_route( - self, - path: str, - endpoint: Callable[..., Any], - name: Optional[str] = None, - *, - dependencies: Optional[Sequence[Depends]] = None, - ) -> None: - self.router.add_api_websocket_route( - path, - endpoint, - name=name, - dependencies=dependencies, - ) - - def websocket( - self, - path: str, - name: Optional[str] = None, - *, - dependencies: Optional[Sequence[Depends]] = None, - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - def decorator(func: DecoratedCallable) -> DecoratedCallable: - self.add_api_websocket_route( - path, - func, - name=name, - dependencies=dependencies, - ) - return func - - return decorator - - def include_router( - self, - router: routing.APIRouter, - *, - prefix: str = "", - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - include_in_schema: bool = True, - default_response_class: Type[Response] = Default(JSONResponse), - callbacks: Optional[List[BaseRoute]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> None: - self.router.include_router( - router, - prefix=prefix, - tags=tags, - dependencies=dependencies, - responses=responses, - deprecated=deprecated, - include_in_schema=include_in_schema, - default_response_class=default_response_class, - callbacks=callbacks, - generate_unique_id_function=generate_unique_id_function, - ) - - def get( - self, - path: str, - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Type[Response] = Default(JSONResponse), - name: Optional[str] = None, - callbacks: Optional[List[BaseRoute]] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - return self.router.get( - path, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - callbacks=callbacks, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - - def put( - self, - path: str, - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Type[Response] = Default(JSONResponse), - name: Optional[str] = None, - callbacks: Optional[List[BaseRoute]] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - return self.router.put( - path, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - callbacks=callbacks, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - - def post( - self, - path: str, - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Type[Response] = Default(JSONResponse), - name: Optional[str] = None, - callbacks: Optional[List[BaseRoute]] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - return self.router.post( - path, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - callbacks=callbacks, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - - def delete( - self, - path: str, - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Type[Response] = Default(JSONResponse), - name: Optional[str] = None, - callbacks: Optional[List[BaseRoute]] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - return self.router.delete( - path, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - callbacks=callbacks, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - - def options( - self, - path: str, - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Type[Response] = Default(JSONResponse), - name: Optional[str] = None, - callbacks: Optional[List[BaseRoute]] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - return self.router.options( - path, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - callbacks=callbacks, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - - def head( - self, - path: str, - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Type[Response] = Default(JSONResponse), - name: Optional[str] = None, - callbacks: Optional[List[BaseRoute]] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - return self.router.head( - path, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - callbacks=callbacks, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - - def patch( - self, - path: str, - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Type[Response] = Default(JSONResponse), - name: Optional[str] = None, - callbacks: Optional[List[BaseRoute]] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - return self.router.patch( - path, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - callbacks=callbacks, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - - def trace( - self, - path: str, - *, - response_model: Any = Default(None), - status_code: Optional[int] = None, - tags: Optional[List[Union[str, Enum]]] = None, - dependencies: Optional[Sequence[Depends]] = None, - summary: Optional[str] = None, - description: Optional[str] = None, - response_description: str = "Successful Response", - responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, - deprecated: Optional[bool] = None, - operation_id: Optional[str] = None, - response_model_include: Optional[IncEx] = None, - response_model_exclude: Optional[IncEx] = None, - response_model_by_alias: bool = True, - response_model_exclude_unset: bool = False, - response_model_exclude_defaults: bool = False, - response_model_exclude_none: bool = False, - include_in_schema: bool = True, - response_class: Type[Response] = Default(JSONResponse), - name: Optional[str] = None, - callbacks: Optional[List[BaseRoute]] = None, - openapi_extra: Optional[Dict[str, Any]] = None, - generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( - generate_unique_id - ), - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - return self.router.trace( - path, - response_model=response_model, - status_code=status_code, - tags=tags, - dependencies=dependencies, - summary=summary, - description=description, - response_description=response_description, - responses=responses, - deprecated=deprecated, - operation_id=operation_id, - response_model_include=response_model_include, - response_model_exclude=response_model_exclude, - response_model_by_alias=response_model_by_alias, - response_model_exclude_unset=response_model_exclude_unset, - response_model_exclude_defaults=response_model_exclude_defaults, - response_model_exclude_none=response_model_exclude_none, - include_in_schema=include_in_schema, - response_class=response_class, - name=name, - callbacks=callbacks, - openapi_extra=openapi_extra, - generate_unique_id_function=generate_unique_id_function, - ) - - def websocket_route( - self, path: str, name: Union[str, None] = None - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - def decorator(func: DecoratedCallable) -> DecoratedCallable: - self.router.add_websocket_route(path, func, name=name) - return func - - return decorator - - def on_event( - self, event_type: str - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - return self.router.on_event(event_type) - - def middleware( - self, middleware_type: str - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - def decorator(func: DecoratedCallable) -> DecoratedCallable: - self.add_middleware(BaseHTTPMiddleware, dispatch=func) - return func - - return decorator - - def exception_handler( - self, exc_class_or_status_code: Union[int, Type[Exception]] - ) -> Callable[[DecoratedCallable], DecoratedCallable]: - def decorator(func: DecoratedCallable) -> DecoratedCallable: - self.add_exception_handler(exc_class_or_status_code, func) - return func - - return decorator diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/av1_metadata_bsf.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/av1_metadata_bsf.c deleted file mode 100644 index 41b02cc836b14d419012562816f53d42ae208f9c..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/av1_metadata_bsf.c +++ /dev/null @@ -1,231 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/common.h" -#include "libavutil/opt.h" - -#include "bsf.h" -#include "bsf_internal.h" -#include "cbs.h" -#include "cbs_bsf.h" -#include "cbs_av1.h" - -typedef struct AV1MetadataContext { - CBSBSFContext common; - - int td; - AV1RawOBU td_obu; - - int color_primaries; - int transfer_characteristics; - int matrix_coefficients; - - int color_range; - int chroma_sample_position; - - AVRational tick_rate; - int num_ticks_per_picture; - - int delete_padding; -} AV1MetadataContext; - - -static int av1_metadata_update_sequence_header(AVBSFContext *bsf, - AV1RawSequenceHeader *seq) -{ - AV1MetadataContext *ctx = bsf->priv_data; - AV1RawColorConfig *clc = &seq->color_config; - AV1RawTimingInfo *tim = &seq->timing_info; - - if (ctx->color_primaries >= 0 || - ctx->transfer_characteristics >= 0 || - ctx->matrix_coefficients >= 0) { - clc->color_description_present_flag = 1; - - if (ctx->color_primaries >= 0) - clc->color_primaries = ctx->color_primaries; - if (ctx->transfer_characteristics >= 0) - clc->transfer_characteristics = ctx->transfer_characteristics; - if (ctx->matrix_coefficients >= 0) - clc->matrix_coefficients = ctx->matrix_coefficients; - } - - if (ctx->color_range >= 0) { - if (clc->color_primaries == AVCOL_PRI_BT709 && - clc->transfer_characteristics == AVCOL_TRC_IEC61966_2_1 && - clc->matrix_coefficients == AVCOL_SPC_RGB) { - av_log(bsf, AV_LOG_WARNING, "Warning: color_range cannot be set " - "on RGB streams encoded in BT.709 sRGB.\n"); - } else { - clc->color_range = ctx->color_range; - } - } - - if (ctx->chroma_sample_position >= 0) { - if (clc->mono_chrome || !clc->subsampling_x || !clc->subsampling_y) { - av_log(bsf, AV_LOG_WARNING, "Warning: chroma_sample_position " - "can only be set for 4:2:0 streams.\n"); - } else { - clc->chroma_sample_position = ctx->chroma_sample_position; - } - } - - if (ctx->tick_rate.num && ctx->tick_rate.den) { - int num, den; - - av_reduce(&num, &den, ctx->tick_rate.num, ctx->tick_rate.den, - UINT32_MAX > INT_MAX ? UINT32_MAX : INT_MAX); - - tim->time_scale = num; - tim->num_units_in_display_tick = den; - seq->timing_info_present_flag = 1; - - if (ctx->num_ticks_per_picture > 0) { - tim->equal_picture_interval = 1; - tim->num_ticks_per_picture_minus_1 = - ctx->num_ticks_per_picture - 1; - } - } - - return 0; -} - -static int av1_metadata_update_fragment(AVBSFContext *bsf, AVPacket *pkt, - CodedBitstreamFragment *frag) -{ - AV1MetadataContext *ctx = bsf->priv_data; - int err, i; - - for (i = 0; i < frag->nb_units; i++) { - if (frag->units[i].type == AV1_OBU_SEQUENCE_HEADER) { - AV1RawOBU *obu = frag->units[i].content; - err = av1_metadata_update_sequence_header(bsf, &obu->obu.sequence_header); - if (err < 0) - return err; - } - } - - // If a Temporal Delimiter is present, it must be the first OBU. - if (frag->nb_units && frag->units[0].type == AV1_OBU_TEMPORAL_DELIMITER) { - if (ctx->td == BSF_ELEMENT_REMOVE) - ff_cbs_delete_unit(frag, 0); - } else if (pkt && ctx->td == BSF_ELEMENT_INSERT) { - err = ff_cbs_insert_unit_content(frag, 0, AV1_OBU_TEMPORAL_DELIMITER, - &ctx->td_obu, NULL); - if (err < 0) { - av_log(bsf, AV_LOG_ERROR, "Failed to insert Temporal Delimiter.\n"); - return err; - } - } - - if (ctx->delete_padding) { - for (i = frag->nb_units - 1; i >= 0; i--) { - if (frag->units[i].type == AV1_OBU_PADDING) - ff_cbs_delete_unit(frag, i); - } - } - - return 0; -} - -static const CBSBSFType av1_metadata_type = { - .codec_id = AV_CODEC_ID_AV1, - .fragment_name = "temporal unit", - .unit_name = "OBU", - .update_fragment = &av1_metadata_update_fragment, -}; - -static int av1_metadata_init(AVBSFContext *bsf) -{ - AV1MetadataContext *ctx = bsf->priv_data; - - ctx->td_obu = (AV1RawOBU) { - .header.obu_type = AV1_OBU_TEMPORAL_DELIMITER, - }; - - return ff_cbs_bsf_generic_init(bsf, &av1_metadata_type); -} - -#define OFFSET(x) offsetof(AV1MetadataContext, x) -#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_BSF_PARAM) -static const AVOption av1_metadata_options[] = { - BSF_ELEMENT_OPTIONS_PIR("td", "Temporal Delimiter OBU", - td, FLAGS), - - { "color_primaries", "Set color primaries (section 6.4.2)", - OFFSET(color_primaries), AV_OPT_TYPE_INT, - { .i64 = -1 }, -1, 255, FLAGS }, - { "transfer_characteristics", "Set transfer characteristics (section 6.4.2)", - OFFSET(transfer_characteristics), AV_OPT_TYPE_INT, - { .i64 = -1 }, -1, 255, FLAGS }, - { "matrix_coefficients", "Set matrix coefficients (section 6.4.2)", - OFFSET(matrix_coefficients), AV_OPT_TYPE_INT, - { .i64 = -1 }, -1, 255, FLAGS }, - - { "color_range", "Set color range flag (section 6.4.2)", - OFFSET(color_range), AV_OPT_TYPE_INT, - { .i64 = -1 }, -1, 1, FLAGS, "cr" }, - { "tv", "TV (limited) range", 0, AV_OPT_TYPE_CONST, - { .i64 = 0 }, .flags = FLAGS, .unit = "cr" }, - { "pc", "PC (full) range", 0, AV_OPT_TYPE_CONST, - { .i64 = 1 }, .flags = FLAGS, .unit = "cr" }, - - { "chroma_sample_position", "Set chroma sample position (section 6.4.2)", - OFFSET(chroma_sample_position), AV_OPT_TYPE_INT, - { .i64 = -1 }, -1, 3, FLAGS, "csp" }, - { "unknown", "Unknown chroma sample position", 0, AV_OPT_TYPE_CONST, - { .i64 = AV1_CSP_UNKNOWN }, .flags = FLAGS, .unit = "csp" }, - { "vertical", "Left chroma sample position", 0, AV_OPT_TYPE_CONST, - { .i64 = AV1_CSP_VERTICAL }, .flags = FLAGS, .unit = "csp" }, - { "colocated", "Top-left chroma sample position", 0, AV_OPT_TYPE_CONST, - { .i64 = AV1_CSP_COLOCATED }, .flags = FLAGS, .unit = "csp" }, - - { "tick_rate", "Set display tick rate (time_scale / num_units_in_display_tick)", - OFFSET(tick_rate), AV_OPT_TYPE_RATIONAL, - { .dbl = 0.0 }, 0, UINT_MAX, FLAGS }, - { "num_ticks_per_picture", "Set display ticks per picture for CFR streams", - OFFSET(num_ticks_per_picture), AV_OPT_TYPE_INT, - { .i64 = -1 }, -1, INT_MAX, FLAGS }, - - { "delete_padding", "Delete all Padding OBUs", - OFFSET(delete_padding), AV_OPT_TYPE_BOOL, - { .i64 = 0 }, 0, 1, FLAGS}, - - { NULL } -}; - -static const AVClass av1_metadata_class = { - .class_name = "av1_metadata_bsf", - .item_name = av_default_item_name, - .option = av1_metadata_options, - .version = LIBAVUTIL_VERSION_INT, -}; - -static const enum AVCodecID av1_metadata_codec_ids[] = { - AV_CODEC_ID_AV1, AV_CODEC_ID_NONE, -}; - -const FFBitStreamFilter ff_av1_metadata_bsf = { - .p.name = "av1_metadata", - .p.codec_ids = av1_metadata_codec_ids, - .p.priv_class = &av1_metadata_class, - .priv_data_size = sizeof(AV1MetadataContext), - .init = &av1_metadata_init, - .close = &ff_cbs_bsf_generic_close, - .filter = &ff_cbs_bsf_generic_filter, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hpeldsp.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hpeldsp.h deleted file mode 100644 index 45e81b10a59c15e0ec49c24cdf084a634f2598b6..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hpeldsp.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Half-pel DSP functions. - * Copyright (c) 2000, 2001, 2002 Fabrice Bellard - * Copyright (c) 2002-2004 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Half-pel DSP functions. - */ - -#ifndef AVCODEC_HPELDSP_H -#define AVCODEC_HPELDSP_H - -#include -#include - -/* add and put pixel (decoding) */ -// blocksizes for hpel_pixels_func are 8x4,8x8 16x8 16x16 -// h for hpel_pixels_func is limited to {width/2, width} but never larger -// than 16 and never smaller than 4 -typedef void (*op_pixels_func)(uint8_t *block /*align width (8 or 16)*/, - const uint8_t *pixels /*align 1*/, - ptrdiff_t line_size, int h); - -/** - * Half-pel DSP context. - */ -typedef struct HpelDSPContext { - /** - * Halfpel motion compensation with rounding (a+b+1)>>1. - * this is an array[4][4] of motion compensation functions for 4 - * horizontal blocksizes (8,16) and the 4 halfpel positions
    - * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] - * @param block destination where the result is stored - * @param pixels source - * @param line_size number of bytes in a horizontal line of block - * @param h height - */ - op_pixels_func put_pixels_tab[4][4]; - - /** - * Halfpel motion compensation with rounding (a+b+1)>>1. - * This is an array[4][4] of motion compensation functions for 4 - * horizontal blocksizes (8,16) and the 4 halfpel positions
    - * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] - * @param block destination into which the result is averaged (a+b+1)>>1 - * @param pixels source - * @param line_size number of bytes in a horizontal line of block - * @param h height - */ - op_pixels_func avg_pixels_tab[4][4]; - - /** - * Halfpel motion compensation with no rounding (a+b)>>1. - * this is an array[4][4] of motion compensation functions for 2 - * horizontal blocksizes (8,16) and the 4 halfpel positions
    - * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] - * @param block destination where the result is stored - * @param pixels source - * @param line_size number of bytes in a horizontal line of block - * @param h height - * @note The size is kept at [4][4] to match the above pixel_tabs and avoid - * out of bounds reads in the motion estimation code. - */ - op_pixels_func put_no_rnd_pixels_tab[4][4]; - - /** - * Halfpel motion compensation with no rounding (a+b)>>1. - * this is an array[4] of motion compensation functions for 1 - * horizontal blocksize (16) and the 4 halfpel positions
    - * *pixels_tab[0][ xhalfpel + 2*yhalfpel ] - * @param block destination into which the result is averaged (a+b)>>1 - * @param pixels source - * @param line_size number of bytes in a horizontal line of block - * @param h height - */ - op_pixels_func avg_no_rnd_pixels_tab[4]; -} HpelDSPContext; - -void ff_hpeldsp_init(HpelDSPContext *c, int flags); - -void ff_hpeldsp_init_aarch64(HpelDSPContext *c, int flags); -void ff_hpeldsp_init_alpha(HpelDSPContext *c, int flags); -void ff_hpeldsp_init_arm(HpelDSPContext *c, int flags); -void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags); -void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags); -void ff_hpeldsp_init_mips(HpelDSPContext *c, int flags); -void ff_hpeldsp_init_loongarch(HpelDSPContext *c, int flags); - -#endif /* AVCODEC_HPELDSP_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libopus.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libopus.h deleted file mode 100644 index a8223d1d6f1df6fe5867d9e31f97061b9053011f..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libopus.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * libopus encoder/decoder common code - * Copyright (c) 2012 Nicolas George - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_LIBOPUS_H -#define AVCODEC_LIBOPUS_H - -int ff_opus_error_to_averror(int err); - -#endif /* AVCODEC_LIBOPUS_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/hevcpred_msa.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/hevcpred_msa.c deleted file mode 100644 index b056ee986bb06dacdcf2a6c458840a694ec4c06a..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/hevcpred_msa.c +++ /dev/null @@ -1,3077 +0,0 @@ -/* - * Copyright (c) 2015 Shivraj Patil (Shivraj.Patil@imgtec.com) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavcodec/hevcdec.h" -#include "libavutil/mips/generic_macros_msa.h" -#include "hevcpred_mips.h" - -static const int8_t intra_pred_angle_up[17] = { - -32, -26, -21, -17, -13, -9, -5, -2, 0, 2, 5, 9, 13, 17, 21, 26, 32 -}; - -static const int8_t intra_pred_angle_low[16] = { - 32, 26, 21, 17, 13, 9, 5, 2, 0, -2, -5, -9, -13, -17, -21, -26 -}; - -#define HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, \ - mul_val_h0, mul_val_h1, mul_val_h2, mul_val_h3, \ - res0, res1, mul_val_b0, mul_val_b1, round) \ -{ \ - v8i16 res0_m, res1_m, res2_m, res3_m; \ - \ - MUL4(mul_val_h0, vec0, mul_val_h2, vec0, mul_val_h0, vec1, \ - mul_val_h2, vec1, res0_m, res1_m, res2_m, res3_m); \ - \ - res0_m += mul_val_h1 * tmp0; \ - res1_m += mul_val_h3 * tmp0; \ - res2_m += mul_val_h1 * tmp0; \ - res3_m += mul_val_h3 * tmp0; \ - \ - res0_m += mul_val_b0 * src0_r; \ - res1_m += mul_val_b0 * src0_l; \ - res2_m += (mul_val_b0 - 1) * src0_r; \ - res3_m += (mul_val_b0 - 1) * src0_l; \ - \ - res0_m += mul_val_b1 * tmp1; \ - res1_m += mul_val_b1 * tmp1; \ - res2_m += (mul_val_b1 + 1) * tmp1; \ - res3_m += (mul_val_b1 + 1) * tmp1; \ - \ - SRARI_H4_SH(res0_m, res1_m, res2_m, res3_m, round); \ - PCKEV_B2_SH(res1_m, res0_m, res3_m, res2_m, res0, res1); \ -} - -static void hevc_intra_pred_vert_4x4_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - int32_t flag) -{ - uint32_t col; - uint32_t src_data; - v8i16 vec0, vec1, vec2; - v16i8 zero = { 0 }; - - src_data = LW(src_top); - SW4(src_data, src_data, src_data, src_data, dst, stride); - - if (0 == flag) { - src_data = LW(src_left); - - vec2 = (v8i16) __msa_insert_w((v4i32) vec2, 0, src_data); - - vec0 = __msa_fill_h(src_left[-1]); - vec1 = __msa_fill_h(src_top[0]); - - vec2 = (v8i16) __msa_ilvr_b(zero, (v16i8) vec2); - vec2 -= vec0; - vec2 >>= 1; - vec2 += vec1; - CLIP_SH_0_255(vec2); - - for (col = 0; col < 4; col++) { - dst[stride * col] = (uint8_t) vec2[col]; - } - } -} - -static void hevc_intra_pred_vert_8x8_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - int32_t flag) -{ - uint8_t *tmp_dst = dst; - uint32_t row; - uint16_t val0, val1, val2, val3; - uint64_t src_data1; - v8i16 vec0, vec1, vec2; - v16i8 zero = { 0 }; - - src_data1 = LD(src_top); - - for (row = 8; row--;) { - SD(src_data1, tmp_dst); - tmp_dst += stride; - } - - if (0 == flag) { - src_data1 = LD(src_left); - - vec2 = (v8i16) __msa_insert_d((v2i64) zero, 0, src_data1); - - vec0 = __msa_fill_h(src_left[-1]); - vec1 = __msa_fill_h(src_top[0]); - - vec2 = (v8i16) __msa_ilvr_b(zero, (v16i8) vec2); - vec2 -= vec0; - vec2 >>= 1; - vec2 += vec1; - CLIP_SH_0_255(vec2); - - val0 = vec2[0]; - val1 = vec2[1]; - val2 = vec2[2]; - val3 = vec2[3]; - - dst[0] = val0; - dst[stride] = val1; - dst[2 * stride] = val2; - dst[3 * stride] = val3; - - val0 = vec2[4]; - val1 = vec2[5]; - val2 = vec2[6]; - val3 = vec2[7]; - - dst[4 * stride] = val0; - dst[5 * stride] = val1; - dst[6 * stride] = val2; - dst[7 * stride] = val3; - } -} - -static void hevc_intra_pred_vert_16x16_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - int32_t flag) -{ - int32_t col; - uint8_t *tmp_dst = dst; - uint32_t row; - v16u8 src; - v8i16 vec0, vec1, vec2, vec3; - - src = LD_UB(src_top); - - for (row = 16; row--;) { - ST_UB(src, tmp_dst); - tmp_dst += stride; - } - - if (0 == flag) { - src = LD_UB(src_left); - - vec0 = __msa_fill_h(src_left[-1]); - vec1 = __msa_fill_h(src_top[0]); - - UNPCK_UB_SH(src, vec2, vec3); - SUB2(vec2, vec0, vec3, vec0, vec2, vec3); - - vec2 >>= 1; - vec3 >>= 1; - - ADD2(vec2, vec1, vec3, vec1, vec2, vec3); - CLIP_SH2_0_255(vec2, vec3); - - src = (v16u8) __msa_pckev_b((v16i8) vec3, (v16i8) vec2); - - for (col = 0; col < 16; col++) { - dst[stride * col] = src[col]; - } - } -} - -static void hevc_intra_pred_horiz_4x4_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - int32_t flag) -{ - uint32_t val0, val1, val2, val3; - v16i8 src0; - v8i16 src0_r, src_top_val, src_left_val; - v16i8 zero = { 0 }; - - val0 = src_left[0] * 0x01010101; - val1 = src_left[1] * 0x01010101; - val2 = src_left[2] * 0x01010101; - val3 = src_left[3] * 0x01010101; - SW4(val0, val1, val2, val3, dst, stride); - - if (0 == flag) { - val0 = LW(src_top); - src0 = (v16i8) __msa_insert_w((v4i32) src0, 0, val0); - src_top_val = __msa_fill_h(src_top[-1]); - src_left_val = __msa_fill_h(src_left[0]); - - src0_r = (v8i16) __msa_ilvr_b(zero, src0); - - src0_r -= src_top_val; - src0_r >>= 1; - src0_r += src_left_val; - CLIP_SH_0_255(src0_r); - src0 = __msa_pckev_b((v16i8) src0_r, (v16i8) src0_r); - val0 = __msa_copy_s_w((v4i32) src0, 0); - SW(val0, dst); - } -} - -static void hevc_intra_pred_horiz_8x8_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - int32_t flag) -{ - uint64_t val0, val1, val2, val3; - v16i8 src0; - v8i16 src0_r, src_top_val, src_left_val; - v16i8 zero = { 0 }; - - val0 = src_left[0] * 0x0101010101010101; - val1 = src_left[1] * 0x0101010101010101; - val2 = src_left[2] * 0x0101010101010101; - val3 = src_left[3] * 0x0101010101010101; - SD4(val0, val1, val2, val3, dst, stride); - - val0 = src_left[4] * 0x0101010101010101; - val1 = src_left[5] * 0x0101010101010101; - val2 = src_left[6] * 0x0101010101010101; - val3 = src_left[7] * 0x0101010101010101; - SD4(val0, val1, val2, val3, dst + 4 * stride, stride); - - if (0 == flag) { - val0 = LD(src_top); - src0 = (v16i8) __msa_insert_d((v2i64) src0, 0, val0); - src_top_val = __msa_fill_h(src_top[-1]); - src_left_val = __msa_fill_h(src_left[0]); - - src0_r = (v8i16) __msa_ilvr_b(zero, src0); - - src0_r -= src_top_val; - src0_r >>= 1; - src0_r += src_left_val; - CLIP_SH_0_255(src0_r); - src0 = __msa_pckev_b((v16i8) src0_r, (v16i8) src0_r); - val0 = __msa_copy_s_d((v2i64) src0, 0); - SD(val0, dst); - } -} - -static void hevc_intra_pred_horiz_16x16_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - int32_t flag) -{ - uint8_t *tmp_dst = dst; - uint32_t row; - uint8_t inp0, inp1, inp2, inp3; - v16i8 src0, src1, src2, src3; - v8i16 src0_r, src0_l, src_left_val, src_top_val; - - src_left_val = __msa_fill_h(src_left[0]); - - for (row = 4; row--;) { - inp0 = src_left[0]; - inp1 = src_left[1]; - inp2 = src_left[2]; - inp3 = src_left[3]; - src_left += 4; - - src0 = __msa_fill_b(inp0); - src1 = __msa_fill_b(inp1); - src2 = __msa_fill_b(inp2); - src3 = __msa_fill_b(inp3); - - ST_SB4(src0, src1, src2, src3, tmp_dst, stride); - tmp_dst += (4 * stride); - } - - if (0 == flag) { - src0 = LD_SB(src_top); - src_top_val = __msa_fill_h(src_top[-1]); - - UNPCK_UB_SH(src0, src0_r, src0_l); - SUB2(src0_r, src_top_val, src0_l, src_top_val, src0_r, src0_l); - - src0_r >>= 1; - src0_l >>= 1; - - ADD2(src0_r, src_left_val, src0_l, src_left_val, src0_r, src0_l); - CLIP_SH2_0_255(src0_r, src0_l); - src0 = __msa_pckev_b((v16i8) src0_l, (v16i8) src0_r); - ST_SB(src0, dst); - } -} - -static void hevc_intra_pred_horiz_32x32_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride) -{ - uint32_t row; - uint8_t inp0, inp1, inp2, inp3; - v16i8 src0, src1, src2, src3; - - for (row = 0; row < 8; row++) { - inp0 = src_left[row * 4]; - inp1 = src_left[row * 4 + 1]; - inp2 = src_left[row * 4 + 2]; - inp3 = src_left[row * 4 + 3]; - - src0 = __msa_fill_b(inp0); - src1 = __msa_fill_b(inp1); - src2 = __msa_fill_b(inp2); - src3 = __msa_fill_b(inp3); - - ST_SB2(src0, src0, dst, 16); - dst += stride; - ST_SB2(src1, src1, dst, 16); - dst += stride; - ST_SB2(src2, src2, dst, 16); - dst += stride; - ST_SB2(src3, src3, dst, 16); - dst += stride; - } -} - -static void hevc_intra_pred_dc_4x4_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - int32_t flag) -{ - uint8_t *tmp_dst = dst; - uint32_t addition = 0; - uint32_t val0, val1, val2; - v16i8 src = { 0 }; - v16u8 store; - v16i8 zero = { 0 }; - v8u16 sum, vec0, vec1; - - val0 = LW(src_top); - val1 = LW(src_left); - INSERT_W2_SB(val0, val1, src); - sum = __msa_hadd_u_h((v16u8) src, (v16u8) src); - sum = (v8u16) __msa_hadd_u_w(sum, sum); - sum = (v8u16) __msa_hadd_u_d((v4u32) sum, (v4u32) sum); - sum = (v8u16) __msa_srari_w((v4i32) sum, 3); - addition = __msa_copy_u_w((v4i32) sum, 0); - store = (v16u8) __msa_fill_b(addition); - val0 = __msa_copy_u_w((v4i32) store, 0); - SW4(val0, val0, val0, val0, dst, stride) - - if (0 == flag) { - ILVR_B2_UH(zero, store, zero, src, vec0, vec1); - - vec1 += vec0; - vec0 += vec0; - vec1 += vec0; - - vec1 = (v8u16) __msa_srari_h((v8i16) vec1, 2); - store = (v16u8) __msa_pckev_b((v16i8) vec1, (v16i8) vec1); - val1 = (src_left[0] + 2 * addition + src_top[0] + 2) >> 2; - store = (v16u8) __msa_insert_b((v16i8) store, 0, val1); - val0 = __msa_copy_u_w((v4i32) store, 0); - SW(val0, tmp_dst); - - val0 = src_left[1]; - val1 = src_left[2]; - val2 = src_left[3]; - - addition *= 3; - - ADD2(val0, addition, val1, addition, val0, val1); - val2 += addition; - - val0 += 2; - val1 += 2; - val2 += 2; - val0 >>= 2; - val1 >>= 2; - val2 >>= 2; - - tmp_dst[stride * 1] = val0; - tmp_dst[stride * 2] = val1; - tmp_dst[stride * 3] = val2; - } -} - -static void hevc_intra_pred_dc_8x8_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - int32_t flag) -{ - uint8_t *tmp_dst = dst; - uint32_t row, col, val; - uint32_t addition = 0; - uint64_t val0, val1; - v16u8 src = { 0 }; - v16u8 store; - v8u16 sum, vec0, vec1; - v16i8 zero = { 0 }; - - val0 = LD(src_top); - val1 = LD(src_left); - INSERT_D2_UB(val0, val1, src); - sum = __msa_hadd_u_h((v16u8) src, (v16u8) src); - sum = (v8u16) __msa_hadd_u_w(sum, sum); - sum = (v8u16) __msa_hadd_u_d((v4u32) sum, (v4u32) sum); - sum = (v8u16) __msa_pckev_w((v4i32) sum, (v4i32) sum); - sum = (v8u16) __msa_hadd_u_d((v4u32) sum, (v4u32) sum); - sum = (v8u16) __msa_srari_w((v4i32) sum, 4); - addition = __msa_copy_u_w((v4i32) sum, 0); - store = (v16u8) __msa_fill_b(addition); - val0 = __msa_copy_u_d((v2i64) store, 0); - - for (row = 8; row--;) { - SD(val0, dst); - dst += stride; - } - - if (0 == flag) { - ILVR_B2_UH(zero, store, zero, src, vec0, vec1); - - vec1 += vec0; - vec0 += vec0; - vec1 += vec0; - vec1 = (v8u16) __msa_srari_h((v8i16) vec1, 2); - store = (v16u8) __msa_pckev_b((v16i8) vec1, (v16i8) vec1); - val = (src_left[0] + 2 * addition + src_top[0] + 2) >> 2; - store = (v16u8) __msa_insert_b((v16i8) store, 0, val); - val0 = __msa_copy_u_d((v2i64) store, 0); - SD(val0, tmp_dst); - - val0 = LD(src_left); - src = (v16u8) __msa_insert_d((v2i64) src, 0, val0); - vec1 = (v8u16) __msa_ilvr_b(zero, (v16i8) src); - vec0 = (v8u16) __msa_fill_h(addition); - vec0 *= 3; - vec1 += vec0; - vec1 = (v8u16) __msa_srari_h((v8i16) vec1, 2); - - for (col = 1; col < 8; col++) { - tmp_dst[stride * col] = vec1[col]; - } - } -} - -static void hevc_intra_pred_dc_16x16_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - int32_t flag) -{ - uint8_t *tmp_dst = dst; - uint32_t row, col, val; - uint32_t addition = 0; - v16u8 src_above1, store, src_left1; - v8u16 sum, sum_above, sum_left; - v8u16 vec0, vec1, vec2; - v16i8 zero = { 0 }; - - src_above1 = LD_UB(src_top); - src_left1 = LD_UB(src_left); - - HADD_UB2_UH(src_above1, src_left1, sum_above, sum_left); - sum = sum_above + sum_left; - sum = (v8u16) __msa_hadd_u_w(sum, sum); - sum = (v8u16) __msa_hadd_u_d((v4u32) sum, (v4u32) sum); - sum = (v8u16) __msa_pckev_w((v4i32) sum, (v4i32) sum); - sum = (v8u16) __msa_hadd_u_d((v4u32) sum, (v4u32) sum); - sum = (v8u16) __msa_srari_w((v4i32) sum, 5); - addition = __msa_copy_u_w((v4i32) sum, 0); - store = (v16u8) __msa_fill_b(addition); - - for (row = 16; row--;) { - ST_UB(store, dst); - dst += stride; - } - - if (0 == flag) { - vec0 = (v8u16) __msa_ilvr_b(zero, (v16i8) store); - ILVRL_B2_UH(zero, src_above1, vec1, vec2); - ADD2(vec1, vec0, vec2, vec0, vec1, vec2); - vec0 += vec0; - ADD2(vec1, vec0, vec2, vec0, vec1, vec2); - SRARI_H2_UH(vec1, vec2, 2); - store = (v16u8) __msa_pckev_b((v16i8) vec2, (v16i8) vec1); - val = (src_left[0] + 2 * addition + src_top[0] + 2) >> 2; - store = (v16u8) __msa_insert_b((v16i8) store, 0, val); - ST_UB(store, tmp_dst); - - ILVRL_B2_UH(zero, src_left1, vec1, vec2); - vec0 = (v8u16) __msa_fill_h(addition); - vec0 *= 3; - ADD2(vec1, vec0, vec2, vec0, vec1, vec2); - SRARI_H2_UH(vec1, vec2, 2); - store = (v16u8) __msa_pckev_b((v16i8) vec2, (v16i8) vec1); - - for (col = 1; col < 16; col++) { - tmp_dst[stride * col] = store[col]; - } - } -} - -static void hevc_intra_pred_dc_32x32_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride) -{ - uint32_t row; - v16u8 src_above1, src_above2, store, src_left1, src_left2; - v8u16 sum_above1, sum_above2; - v8u16 sum_left1, sum_left2; - v8u16 sum, sum_above, sum_left; - - LD_UB2(src_top, 16, src_above1, src_above2); - LD_UB2(src_left, 16, src_left1, src_left2); - HADD_UB2_UH(src_above1, src_above2, sum_above1, sum_above2); - HADD_UB2_UH(src_left1, src_left2, sum_left1, sum_left2); - sum_above = sum_above1 + sum_above2; - sum_left = sum_left1 + sum_left2; - sum = sum_above + sum_left; - sum = (v8u16) __msa_hadd_u_w(sum, sum); - sum = (v8u16) __msa_hadd_u_d((v4u32) sum, (v4u32) sum); - sum = (v8u16) __msa_pckev_w((v4i32) sum, (v4i32) sum); - sum = (v8u16) __msa_hadd_u_d((v4u32) sum, (v4u32) sum); - sum = (v8u16) __msa_srari_w((v4i32) sum, 6); - store = (v16u8) __msa_splati_b((v16i8) sum, 0); - - for (row = 16; row--;) { - ST_UB2(store, store, dst, 16); - dst += stride; - ST_UB2(store, store, dst, 16); - dst += stride; - } -} - -static void hevc_intra_pred_plane_4x4_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride) -{ - uint32_t src0, src1; - v16i8 src_vec0, src_vec1; - v8i16 src_vec0_r, src1_r, tmp0, tmp1, mul_val1; - v8i16 vec0, vec1, vec2, vec3, res0, res1, res2, res3; - v8i16 mul_val0 = { 3, 2, 1, 0, 1, 2, 3, 4 }; - v16i8 zero = { 0 }; - - src0 = LW(src_top); - src1 = LW(src_left); - - mul_val1 = (v8i16) __msa_pckod_d((v2i64) mul_val0, (v2i64) mul_val0); - - src_vec0 = (v16i8) __msa_insert_w((v4i32) zero, 0, src0); - src_vec1 = (v16i8) __msa_insert_w((v4i32) zero, 0, src1); - - ILVR_B2_SH(zero, src_vec0, zero, src_vec1, src_vec0_r, src1_r); - SPLATI_H4_SH(src1_r, 0, 1, 2, 3, vec0, vec1, vec2, vec3); - - tmp0 = __msa_fill_h(src_top[4]); - tmp1 = __msa_fill_h(src_left[4]); - - MUL4(mul_val0, vec0, mul_val0, vec1, mul_val0, vec2, mul_val0, vec3, - res0, res1, res2, res3); - - res0 += mul_val1 * tmp0; - res1 += mul_val1 * tmp0; - res2 += mul_val1 * tmp0; - res3 += mul_val1 * tmp0; - - res0 += 3 * src_vec0_r; - res1 += 2 * src_vec0_r; - res2 += src_vec0_r; - res0 += tmp1; - res1 += 2 * tmp1; - res2 += 3 * tmp1; - res3 += 4 * tmp1; - - PCKEV_D2_SH(res1, res0, res3, res2, res0, res1); - SRARI_H2_SH(res0, res1, 3); - src_vec0 = __msa_pckev_b((v16i8) res1, (v16i8) res0); - ST_W4(src_vec0, 0, 1, 2, 3, dst, stride); -} - -static void hevc_intra_pred_plane_8x8_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride) -{ - uint64_t src0, src1; - v16i8 src_vec0, src_vec1, src_vec2, src_vec3; - v8i16 src_vec0_r, src_vec1_r; - v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8i16 res0, res1, res2, res3, res4, res5, res6, res7; - v8i16 tmp0, tmp1, tmp2; - v8i16 mul_val1 = { 1, 2, 3, 4, 5, 6, 7, 8 }; - v8i16 mul_val0 = { 7, 6, 5, 4, 3, 2, 1, 0 }; - v16i8 zero = { 0 }; - - src0 = LD(src_top); - src1 = LD(src_left); - - src_vec0 = (v16i8) __msa_insert_d((v2i64) zero, 0, src0); - src_vec1 = (v16i8) __msa_insert_d((v2i64) zero, 0, src1); - - ILVR_B2_SH(zero, src_vec0, zero, src_vec1, src_vec0_r, src_vec1_r); - SPLATI_H4_SH(src_vec1_r, 0, 1, 2, 3, vec0, vec1, vec2, vec3); - SPLATI_H4_SH(src_vec1_r, 4, 5, 6, 7, vec4, vec5, vec6, vec7); - - tmp0 = __msa_fill_h(src_top[8]); - tmp1 = __msa_fill_h(src_left[8]); - - MUL4(mul_val0, vec0, mul_val0, vec1, mul_val0, vec2, mul_val0, vec3, - res0, res1, res2, res3); - MUL4(mul_val0, vec4, mul_val0, vec5, mul_val0, vec6, mul_val0, vec7, - res4, res5, res6, res7); - - tmp2 = mul_val1 * tmp0; - res0 += tmp2; - res1 += tmp2; - res2 += tmp2; - res3 += tmp2; - res4 += tmp2; - res5 += tmp2; - res6 += tmp2; - res7 += tmp2; - - res0 += 7 * src_vec0_r; - res1 += 6 * src_vec0_r; - res2 += 5 * src_vec0_r; - res3 += 4 * src_vec0_r; - res4 += 3 * src_vec0_r; - res5 += 2 * src_vec0_r; - res6 += src_vec0_r; - - res0 += tmp1; - res1 += 2 * tmp1; - res2 += 3 * tmp1; - res3 += 4 * tmp1; - res4 += 5 * tmp1; - res5 += 6 * tmp1; - res6 += 7 * tmp1; - res7 += 8 * tmp1; - - SRARI_H4_SH(res0, res1, res2, res3, 4); - SRARI_H4_SH(res4, res5, res6, res7, 4); - PCKEV_B4_SB(res1, res0, res3, res2, res5, res4, res7, res6, - src_vec0, src_vec1, src_vec2, src_vec3); - - ST_D8(src_vec0, src_vec1, src_vec2, src_vec3, 0, 1, 0, 1, - 0, 1, 0, 1, dst, stride); -} - -static void hevc_intra_pred_plane_16x16_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride) -{ - v16u8 src0, src1; - v8i16 src0_r, src1_r, src0_l, src1_l; - v8i16 vec0, vec1; - v8i16 res0, res1, tmp0, tmp1; - v8i16 mul_val2, mul_val3; - v8i16 mul_val1 = { 1, 2, 3, 4, 5, 6, 7, 8 }; - v8i16 mul_val0 = { 15, 14, 13, 12, 11, 10, 9, 8 }; - - src0 = LD_UB(src_top); - src1 = LD_UB(src_left); - - UNPCK_UB_SH(src0, src0_r, src0_l); - UNPCK_UB_SH(src1, src1_r, src1_l); - - mul_val2 = mul_val0 - 8; - mul_val3 = mul_val1 + 8; - - tmp0 = __msa_fill_h(src_top[16]); - tmp1 = __msa_fill_h(src_left[16]); - - SPLATI_H2_SH(src1_r, 0, 1, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 15, 1, 5); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_r, 2, 3, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 13, 3, 5); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_r, 4, 5, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 11, 5, 5); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_r, 6, 7, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 9, 7, 5); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 0, 1, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 7, 9, 5); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 2, 3, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 5, 11, 5); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 4, 5, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 3, 13, 5); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 6, 7, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 1, 15, 5); - ST_SH2(res0, res1, dst, stride); -} - -static void process_intra_upper_16x16_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - uint8_t offset) -{ - v16i8 src0, src1; - v8i16 src0_r, src1_r, src0_l, src1_l; - v8i16 vec0, vec1, res0, res1; - v8i16 tmp0, tmp1; - v8i16 mul_val2, mul_val3; - v8i16 mul_val1 = { 1, 2, 3, 4, 5, 6, 7, 8 }; - v8i16 mul_val0 = { 31, 30, 29, 28, 27, 26, 25, 24 }; - - tmp0 = __msa_fill_h(src_top[32 - offset]); - tmp1 = __msa_fill_h(src_left[32]); - - src0 = LD_SB(src_top); - src1 = LD_SB(src_left); - - UNPCK_UB_SH(src0, src0_r, src0_l); - UNPCK_UB_SH(src1, src1_r, src1_l); - - mul_val1 += offset; - mul_val0 -= offset; - mul_val2 = mul_val0 - 8; - mul_val3 = mul_val1 + 8; - - SPLATI_H2_SH(src1_r, 0, 1, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 31, 1, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_r, 2, 3, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 29, 3, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_r, 4, 5, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 27, 5, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_r, 6, 7, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 25, 7, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 0, 1, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 23, 9, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 2, 3, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 21, 11, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 4, 5, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 19, 13, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 6, 7, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 17, 15, 6); - ST_SH2(res0, res1, dst, stride); -} - -static void process_intra_lower_16x16_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride, - uint8_t offset) -{ - v16i8 src0, src1; - v8i16 src0_r, src1_r, src0_l, src1_l; - v8i16 vec0, vec1, res0, res1, tmp0, tmp1; - v8i16 mul_val2, mul_val3; - v8i16 mul_val1 = { 1, 2, 3, 4, 5, 6, 7, 8 }; - v8i16 mul_val0 = { 31, 30, 29, 28, 27, 26, 25, 24 }; - - tmp0 = __msa_fill_h(src_top[32 - offset]); - tmp1 = __msa_fill_h(src_left[16]); - - src0 = LD_SB(src_top); - src1 = LD_SB(src_left); - - UNPCK_UB_SH(src0, src0_r, src0_l); - UNPCK_UB_SH(src1, src1_r, src1_l); - - mul_val1 += offset; - mul_val0 -= offset; - mul_val2 = mul_val0 - 8; - mul_val3 = mul_val1 + 8; - - SPLATI_H2_SH(src1_r, 0, 1, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 15, 17, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_r, 2, 3, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 13, 19, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_r, 4, 5, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 11, 21, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_r, 6, 7, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 9, 23, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 0, 1, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 7, 25, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 2, 3, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 5, 27, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 4, 5, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 3, 29, 6); - ST_SH2(res0, res1, dst, stride); - dst += (2 * stride); - - SPLATI_H2_SH(src1_l, 6, 7, vec0, vec1); - HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, - mul_val0, mul_val1, mul_val2, mul_val3, - res0, res1, 1, 31, 6); - ST_SH2(res0, res1, dst, stride); -} - -static void hevc_intra_pred_plane_32x32_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t stride) -{ - process_intra_upper_16x16_msa(src_top, src_left, dst, stride, 0); - process_intra_upper_16x16_msa((src_top + 16), src_left, - (dst + 16), stride, 16); - dst += (16 * stride); - src_left += 16; - - process_intra_lower_16x16_msa(src_top, src_left, dst, stride, 0); - process_intra_lower_16x16_msa((src_top + 16), src_left, - (dst + 16), stride, 16); -} - -static void hevc_intra_pred_angular_upper_4width_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, - int32_t stride, - int32_t mode) -{ - int16_t inv_angle[] = { -256, -315, -390, -482, -630, -910, -1638, -4096 }; - uint8_t ref_array[3 * 32 + 4]; - uint8_t *ref_tmp = ref_array + 4; - const uint8_t *ref; - int32_t last; - int32_t h_cnt, idx0, fact_val0, idx1, fact_val1; - int32_t idx2, fact_val2, idx3, fact_val3; - int32_t angle, angle_loop; - int32_t inv_angle_val, offset; - uint64_t tmp0; - v16i8 top0, top1, top2, top3; - v16i8 dst_val0; - v16i8 zero = { 0 }; - v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7; - v8i16 fact0, fact1, fact2, fact3, fact4, fact5, fact6, fact7; - - angle = intra_pred_angle_up[mode - 18]; - inv_angle_val = inv_angle[mode - 18]; - last = (angle) >> 3; - angle_loop = angle; - - ref = src_top - 1; - if (angle < 0 && last < -1) { - inv_angle_val = inv_angle[mode - 18]; - - tmp0 = LD(ref); - SD(tmp0, ref_tmp); - - for (h_cnt = last; h_cnt <= -1; h_cnt++) { - offset = -1 + ((h_cnt * inv_angle_val + 128) >> 8); - ref_tmp[h_cnt] = src_left[offset]; - } - - ref = ref_tmp; - } - - idx0 = angle_loop >> 5; - fact_val0 = angle_loop & 31; - angle_loop += angle; - - idx1 = angle_loop >> 5; - fact_val1 = angle_loop & 31; - angle_loop += angle; - - idx2 = angle_loop >> 5; - fact_val2 = angle_loop & 31; - angle_loop += angle; - - idx3 = angle_loop >> 5; - fact_val3 = angle_loop & 31; - - top0 = LD_SB(ref + idx0 + 1); - top1 = LD_SB(ref + idx1 + 1); - top2 = LD_SB(ref + idx2 + 1); - top3 = LD_SB(ref + idx3 + 1); - - fact0 = __msa_fill_h(fact_val0); - fact1 = __msa_fill_h(32 - fact_val0); - - fact2 = __msa_fill_h(fact_val1); - fact3 = __msa_fill_h(32 - fact_val1); - - fact4 = __msa_fill_h(fact_val2); - fact5 = __msa_fill_h(32 - fact_val2); - - fact6 = __msa_fill_h(fact_val3); - fact7 = __msa_fill_h(32 - fact_val3); - - ILVR_D2_SH(fact2, fact0, fact6, fact4, fact0, fact2); - ILVR_D2_SH(fact3, fact1, fact7, fact5, fact1, fact3); - ILVR_B4_SH(zero, top0, zero, top1, zero, top2, zero, top3, - diff0, diff2, diff4, diff6); - SLDI_B4_SH(zero, diff0, zero, diff2, zero, diff4, zero, diff6, 2, - diff1, diff3, diff5, diff7); - ILVR_D2_SH(diff2, diff0, diff6, diff4, diff0, diff2); - ILVR_D2_SH(diff3, diff1, diff7, diff5, diff1, diff3); - MUL2(diff1, fact0, diff3, fact2, diff1, diff3); - - diff1 += diff0 * fact1; - diff3 += diff2 * fact3; - - SRARI_H2_SH(diff1, diff3, 5); - dst_val0 = __msa_pckev_b((v16i8) diff3, (v16i8) diff1); - ST_W4(dst_val0, 0, 1, 2, 3, dst, stride); -} - -static void hevc_intra_pred_angular_upper_8width_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, - int32_t stride, - int32_t mode) -{ - int16_t inv_angle[] = { -256, -315, -390, -482, -630, -910, -1638, -4096 }; - uint8_t ref_array[3 * 32 + 4]; - uint8_t *ref_tmp = ref_array + 8; - const uint8_t *ref; - const uint8_t *src_left_tmp = src_left - 1; - int32_t last, offset; - int32_t h_cnt, v_cnt, idx0, fact_val0, idx1, fact_val1; - int32_t idx2, fact_val2, idx3, fact_val3; - int32_t angle, angle_loop; - int32_t inv_angle_val, inv_angle_val_loop; - int32_t tmp0, tmp1, tmp2; - v16i8 top0, top1, top2, top3; - v16u8 dst_val0, dst_val1; - v8i16 fact0, fact1, fact2, fact3, fact4, fact5, fact6, fact7; - v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7; - - angle = intra_pred_angle_up[mode - 18]; - inv_angle_val = inv_angle[mode - 18]; - last = (angle) >> 2; - angle_loop = angle; - - ref = src_top - 1; - if (last < -1) { - inv_angle_val_loop = inv_angle_val * last; - - tmp0 = LW(ref); - tmp1 = LW(ref + 4); - tmp2 = LW(ref + 8); - SW(tmp0, ref_tmp); - SW(tmp1, ref_tmp + 4); - SW(tmp2, ref_tmp + 8); - - for (h_cnt = last; h_cnt <= -1; h_cnt++) { - offset = (inv_angle_val_loop + 128) >> 8; - ref_tmp[h_cnt] = src_left_tmp[offset]; - inv_angle_val_loop += inv_angle_val; - } - ref = ref_tmp; - } - - for (v_cnt = 0; v_cnt < 2; v_cnt++) { - idx0 = (angle_loop) >> 5; - fact_val0 = (angle_loop) & 31; - angle_loop += angle; - - idx1 = (angle_loop) >> 5; - fact_val1 = (angle_loop) & 31; - angle_loop += angle; - - idx2 = (angle_loop) >> 5; - fact_val2 = (angle_loop) & 31; - angle_loop += angle; - - idx3 = (angle_loop) >> 5; - fact_val3 = (angle_loop) & 31; - angle_loop += angle; - - top0 = LD_SB(ref + idx0 + 1); - top1 = LD_SB(ref + idx1 + 1); - top2 = LD_SB(ref + idx2 + 1); - top3 = LD_SB(ref + idx3 + 1); - - fact0 = __msa_fill_h(fact_val0); - fact1 = __msa_fill_h(32 - fact_val0); - fact2 = __msa_fill_h(fact_val1); - fact3 = __msa_fill_h(32 - fact_val1); - fact4 = __msa_fill_h(fact_val2); - fact5 = __msa_fill_h(32 - fact_val2); - fact6 = __msa_fill_h(fact_val3); - fact7 = __msa_fill_h(32 - fact_val3); - - UNPCK_UB_SH(top0, diff0, diff1); - UNPCK_UB_SH(top1, diff2, diff3); - UNPCK_UB_SH(top2, diff4, diff5); - UNPCK_UB_SH(top3, diff6, diff7); - - SLDI_B4_SH(diff1, diff0, diff3, diff2, diff5, diff4, diff7, diff6, 2, - diff1, diff3, diff5, diff7); - MUL4(diff1, fact0, diff3, fact2, diff5, fact4, diff7, fact6, - diff1, diff3, diff5, diff7); - - diff1 += diff0 * fact1; - diff3 += diff2 * fact3; - diff5 += diff4 * fact5; - diff7 += diff6 * fact7; - - SRARI_H4_SH(diff1, diff3, diff5, diff7, 5); - PCKEV_B2_UB(diff3, diff1, diff7, diff5, dst_val0, dst_val1); - ST_D4(dst_val0, dst_val1, 0, 1, 0, 1, dst, stride); - dst += (4 * stride); - } -} - -static void hevc_intra_pred_angular_upper_16width_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, - int32_t stride, - int32_t mode) -{ - int16_t inv_angle[] = { -256, -315, -390, -482, -630, -910, -1638, -4096 }; - int32_t h_cnt, v_cnt, idx0, fact_val0, idx1, fact_val1; - int32_t idx2, fact_val2, idx3, fact_val3; - int32_t tmp0; - int32_t angle, angle_loop, offset; - int32_t inv_angle_val, inv_angle_val_loop; - uint8_t ref_array[3 * 32 + 4]; - uint8_t *ref_tmp = ref_array + 16; - const uint8_t *ref; - const uint8_t *src_left_tmp = src_left - 1; - int32_t last; - v16u8 top0, top1, top2, top3, top4, top5, top6, top7; - v16i8 dst0, dst1, dst2, dst3; - v8i16 fact0, fact1, fact2, fact3, fact4, fact5, fact6, fact7; - v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7; - v8i16 diff8, diff9, diff10, diff11, diff12, diff13, diff14, diff15; - - angle = intra_pred_angle_up[mode - 18]; - inv_angle_val = inv_angle[mode - 18]; - last = angle >> 1; - angle_loop = angle; - - ref = src_top - 1; - if (last < -1) { - inv_angle_val_loop = inv_angle_val * last; - - top0 = LD_UB(ref); - tmp0 = LW(ref + 16); - ST_UB(top0, ref_tmp); - SW(tmp0, ref_tmp + 16); - - for (h_cnt = last; h_cnt <= -1; h_cnt++) { - offset = (inv_angle_val_loop + 128) >> 8; - ref_tmp[h_cnt] = src_left_tmp[offset]; - inv_angle_val_loop += inv_angle_val; - } - ref = ref_tmp; - } - - for (v_cnt = 4; v_cnt--;) { - idx0 = (angle_loop) >> 5; - fact_val0 = (angle_loop) & 31; - angle_loop += angle; - - idx1 = (angle_loop) >> 5; - fact_val1 = (angle_loop) & 31; - angle_loop += angle; - - idx2 = (angle_loop) >> 5; - fact_val2 = (angle_loop) & 31; - angle_loop += angle; - - idx3 = (angle_loop) >> 5; - fact_val3 = (angle_loop) & 31; - angle_loop += angle; - - LD_UB2(ref + idx0 + 1, 16, top0, top1); - LD_UB2(ref + idx1 + 1, 16, top2, top3); - LD_UB2(ref + idx2 + 1, 16, top4, top5); - LD_UB2(ref + idx3 + 1, 16, top6, top7); - - fact0 = __msa_fill_h(fact_val0); - fact1 = __msa_fill_h(32 - fact_val0); - fact2 = __msa_fill_h(fact_val1); - fact3 = __msa_fill_h(32 - fact_val1); - fact4 = __msa_fill_h(fact_val2); - fact5 = __msa_fill_h(32 - fact_val2); - fact6 = __msa_fill_h(fact_val3); - fact7 = __msa_fill_h(32 - fact_val3); - - SLDI_B4_UB(top1, top0, top3, top2, top5, top4, top7, top6, 1, - top1, top3, top5, top7); - UNPCK_UB_SH(top0, diff0, diff1); - UNPCK_UB_SH(top1, diff2, diff3); - UNPCK_UB_SH(top2, diff4, diff5); - UNPCK_UB_SH(top3, diff6, diff7); - UNPCK_UB_SH(top4, diff8, diff9); - UNPCK_UB_SH(top5, diff10, diff11); - UNPCK_UB_SH(top6, diff12, diff13); - UNPCK_UB_SH(top7, diff14, diff15); - - MUL4(diff2, fact0, diff3, fact0, diff6, fact2, diff7, fact2, - diff2, diff3, diff6, diff7); - MUL4(diff10, fact4, diff11, fact4, diff14, fact6, diff15, fact6, - diff10, diff11, diff14, diff15); - - diff2 += diff0 * fact1; - diff3 += diff1 * fact1; - diff6 += diff4 * fact3; - diff7 += diff5 * fact3; - diff10 += diff8 * fact5; - diff11 += diff9 * fact5; - diff14 += diff12 * fact7; - diff15 += diff13 * fact7; - - SRARI_H4_SH(diff2, diff3, diff6, diff7, 5); - SRARI_H4_SH(diff10, diff11, diff14, diff15, 5); - PCKEV_B4_SB(diff3, diff2, diff7, diff6, diff11, diff10, diff15, diff14, - dst0, dst1, dst2, dst3); - ST_SB4(dst0, dst1, dst2, dst3, dst, stride); - dst += (4 * stride); - } -} - -static void hevc_intra_pred_angular_upper_32width_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, - int32_t stride, - int32_t mode) -{ - int16_t inv_angle[] = { -256, -315, -390, -482, -630, -910, -1638, -4096 }; - uint8_t ref_array[3 * 32 + 4]; - uint8_t *ref_tmp; - const uint8_t *ref; - const uint8_t *src_left_tmp = src_left - 1; - int32_t h_cnt, v_cnt, idx0, fact_val0, idx1, fact_val1; - int32_t tmp0, tmp1, tmp2, tmp3; - int32_t angle, angle_loop; - int32_t inv_angle_val, inv_angle_val_loop; - int32_t last, offset; - v16u8 top0, top1, top2, top3, top4, top5, top6, top7; - v16i8 dst0, dst1, dst2, dst3; - v8i16 fact0, fact1, fact2, fact3; - v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7; - v8i16 diff8, diff9, diff10, diff11, diff12, diff13, diff14, diff15; - - ref_tmp = ref_array + 32; - - angle = intra_pred_angle_up[mode - 18]; - inv_angle_val = inv_angle[mode - 18]; - last = angle; - angle_loop = angle; - - ref = src_top - 1; - if (last < -1) { - inv_angle_val_loop = inv_angle_val * last; - LD_UB2(ref, 16, top0, top1); - tmp0 = ref[32]; - tmp1 = ref[33]; - tmp2 = ref[34]; - tmp3 = ref[35]; - - ST_UB2(top0, top1, ref_tmp, 16); - ref_tmp[32] = tmp0; - ref_tmp[33] = tmp1; - ref_tmp[34] = tmp2; - ref_tmp[35] = tmp3; - - for (h_cnt = last; h_cnt <= -1; h_cnt++) { - offset = (inv_angle_val_loop + 128) >> 8; - ref_tmp[h_cnt] = src_left_tmp[offset]; - inv_angle_val_loop += inv_angle_val; - } - - ref = ref_tmp; - } - - for (v_cnt = 16; v_cnt--;) { - idx0 = (angle_loop) >> 5; - fact_val0 = (angle_loop) & 31; - angle_loop += angle; - - idx1 = (angle_loop) >> 5; - fact_val1 = (angle_loop) & 31; - angle_loop += angle; - - top0 = LD_UB(ref + idx0 + 1); - top4 = LD_UB(ref + idx1 + 1); - top1 = LD_UB(ref + idx0 + 17); - top5 = LD_UB(ref + idx1 + 17); - top3 = LD_UB(ref + idx0 + 33); - top7 = LD_UB(ref + idx1 + 33); - - fact0 = __msa_fill_h(fact_val0); - fact1 = __msa_fill_h(32 - fact_val0); - fact2 = __msa_fill_h(fact_val1); - fact3 = __msa_fill_h(32 - fact_val1); - - top2 = top1; - top6 = top5; - - SLDI_B4_UB(top1, top0, top3, top2, top5, top4, top7, top6, 1, - top1, top3, top5, top7); - UNPCK_UB_SH(top0, diff0, diff1); - UNPCK_UB_SH(top1, diff2, diff3); - UNPCK_UB_SH(top2, diff4, diff5); - UNPCK_UB_SH(top3, diff6, diff7); - UNPCK_UB_SH(top4, diff8, diff9); - UNPCK_UB_SH(top5, diff10, diff11); - UNPCK_UB_SH(top6, diff12, diff13); - UNPCK_UB_SH(top7, diff14, diff15); - - MUL4(diff2, fact0, diff3, fact0, diff6, fact0, diff7, fact0, - diff2, diff3, diff6, diff7); - MUL4(diff10, fact2, diff11, fact2, diff14, fact2, diff15, fact2, - diff10, diff11, diff14, diff15); - - diff2 += diff0 * fact1; - diff3 += diff1 * fact1; - diff6 += diff4 * fact1; - diff7 += diff5 * fact1; - diff10 += diff8 * fact3; - diff11 += diff9 * fact3; - diff14 += diff12 * fact3; - diff15 += diff13 * fact3; - - SRARI_H4_SH(diff2, diff3, diff6, diff7, 5); - SRARI_H4_SH(diff10, diff11, diff14, diff15, 5); - PCKEV_B4_SB(diff3, diff2, diff7, diff6, diff11, diff10, diff15, diff14, - dst0, dst1, dst2, dst3); - - ST_SB2(dst0, dst1, dst, 16); - dst += stride; - ST_SB2(dst2, dst3, dst, 16); - dst += stride; - } -} - -static void hevc_intra_pred_angular_lower_4width_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, - int32_t stride, - int32_t mode) -{ - int16_t inv_angle[] = { -4096, -1638, -910, -630, -482, -390, -315 }; - uint8_t ref_array[3 * 32 + 4]; - uint8_t *ref_tmp = ref_array + 4; - const uint8_t *ref; - int32_t last, offset; - int32_t h_cnt, idx0, fact_val0, idx1, fact_val1; - int32_t idx2, fact_val2, idx3, fact_val3; - int32_t angle, angle_loop, inv_angle_val; - uint64_t tmp0; - v16i8 dst_val0, dst_val1; - v16u8 top0, top1, top2, top3; - v16u8 zero = { 0 }; - v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7; - v8i16 fact0, fact1, fact2, fact3, fact4, fact5, fact6, fact7; - - angle = intra_pred_angle_low[mode - 2]; - last = angle >> 3; - angle_loop = angle; - - ref = src_left - 1; - if (last < -1) { - inv_angle_val = inv_angle[mode - 11]; - - tmp0 = LD(ref); - SD(tmp0, ref_tmp); - - for (h_cnt = last; h_cnt <= -1; h_cnt++) { - offset = -1 + ((h_cnt * inv_angle_val + 128) >> 8); - ref_tmp[h_cnt] = src_top[offset]; - } - - ref = ref_tmp; - } - - idx0 = angle_loop >> 5; - fact_val0 = angle_loop & 31; - angle_loop += angle; - - idx1 = angle_loop >> 5; - fact_val1 = angle_loop & 31; - angle_loop += angle; - - idx2 = angle_loop >> 5; - fact_val2 = angle_loop & 31; - angle_loop += angle; - - idx3 = angle_loop >> 5; - fact_val3 = angle_loop & 31; - - top0 = LD_UB(ref + idx0 + 1); - top1 = LD_UB(ref + idx1 + 1); - top2 = LD_UB(ref + idx2 + 1); - top3 = LD_UB(ref + idx3 + 1); - - fact0 = __msa_fill_h(fact_val0); - fact1 = __msa_fill_h(32 - fact_val0); - fact2 = __msa_fill_h(fact_val1); - fact3 = __msa_fill_h(32 - fact_val1); - fact4 = __msa_fill_h(fact_val2); - fact5 = __msa_fill_h(32 - fact_val2); - fact6 = __msa_fill_h(fact_val3); - fact7 = __msa_fill_h(32 - fact_val3); - - ILVR_D2_SH(fact2, fact0, fact6, fact4, fact0, fact2); - ILVR_D2_SH(fact3, fact1, fact7, fact5, fact1, fact3); - ILVR_B4_SH(zero, top0, zero, top1, zero, top2, zero, top3, - diff0, diff2, diff4, diff6); - SLDI_B4_SH(zero, diff0, zero, diff2, zero, diff4, zero, diff6, 2, - diff1, diff3, diff5, diff7); - ILVR_D2_SH(diff2, diff0, diff6, diff4, diff0, diff2); - ILVR_D2_SH(diff3, diff1, diff7, diff5, diff1, diff3); - MUL2(diff1, fact0, diff3, fact2, diff1, diff3); - - diff1 += diff0 * fact1; - diff3 += diff2 * fact3; - - SRARI_H2_SH(diff1, diff3, 5); - PCKEV_B2_SB(diff1, diff1, diff3, diff3, dst_val0, dst_val1); - - diff0 = (v8i16) __msa_pckev_b(dst_val1, dst_val0); - diff1 = (v8i16) __msa_pckod_b(dst_val1, dst_val0); - - diff2 = (v8i16) __msa_pckev_w((v4i32) diff1, (v4i32) diff0); - - dst_val0 = __msa_pckev_b((v16i8) diff2, (v16i8) diff2); - dst_val1 = __msa_pckod_b((v16i8) diff2, (v16i8) diff2); - - ST_W2(dst_val0, 0, 1, dst, stride); - ST_W2(dst_val1, 0, 1, dst + 2 * stride, stride); -} - -static void hevc_intra_pred_angular_lower_8width_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, - int32_t stride, - int32_t mode) -{ - int16_t inv_angle[] = { -4096, -1638, -910, -630, -482, -390, -315 }; - uint8_t ref_array[3 * 32 + 4]; - uint8_t *ref_tmp = ref_array + 8; - const uint8_t *ref; - const uint8_t *src_top_tmp = src_top - 1; - uint8_t *dst_org; - int32_t last, offset, tmp0, tmp1, tmp2; - int32_t h_cnt, v_cnt, idx0, fact_val0, idx1, fact_val1; - int32_t idx2, fact_val2, idx3, fact_val3; - int32_t angle, angle_loop, inv_angle_val; - v16i8 top0, top1, top2, top3; - v16i8 dst_val0, dst_val1, dst_val2, dst_val3; - v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7; - v8i16 fact0, fact1, fact2, fact3, fact4, fact5, fact6, fact7; - - angle = intra_pred_angle_low[mode - 2]; - last = (angle) >> 2; - angle_loop = angle; - - ref = src_left - 1; - if (last < -1) { - inv_angle_val = inv_angle[mode - 11]; - - tmp0 = LW(ref); - tmp1 = LW(ref + 4); - tmp2 = LW(ref + 8); - SW(tmp0, ref_tmp); - SW(tmp1, ref_tmp + 4); - SW(tmp2, ref_tmp + 8); - - for (h_cnt = last; h_cnt <= -1; h_cnt++) { - offset = (h_cnt * inv_angle_val + 128) >> 8; - ref_tmp[h_cnt] = src_top_tmp[offset]; - } - - ref = ref_tmp; - } - - for (v_cnt = 0; v_cnt < 2; v_cnt++) { - dst_org = dst; - - idx0 = angle_loop >> 5; - fact_val0 = angle_loop & 31; - angle_loop += angle; - - idx1 = angle_loop >> 5; - fact_val1 = angle_loop & 31; - angle_loop += angle; - - idx2 = angle_loop >> 5; - fact_val2 = angle_loop & 31; - angle_loop += angle; - - idx3 = angle_loop >> 5; - fact_val3 = angle_loop & 31; - angle_loop += angle; - - top0 = LD_SB(ref + idx0 + 1); - top1 = LD_SB(ref + idx1 + 1); - top2 = LD_SB(ref + idx2 + 1); - top3 = LD_SB(ref + idx3 + 1); - - fact0 = __msa_fill_h(fact_val0); - fact1 = __msa_fill_h(32 - fact_val0); - fact2 = __msa_fill_h(fact_val1); - fact3 = __msa_fill_h(32 - fact_val1); - fact4 = __msa_fill_h(fact_val2); - fact5 = __msa_fill_h(32 - fact_val2); - fact6 = __msa_fill_h(fact_val3); - fact7 = __msa_fill_h(32 - fact_val3); - - UNPCK_UB_SH(top0, diff0, diff1); - UNPCK_UB_SH(top1, diff2, diff3); - UNPCK_UB_SH(top2, diff4, diff5); - UNPCK_UB_SH(top3, diff6, diff7); - SLDI_B4_SH(diff1, diff0, diff3, diff2, diff5, diff4, diff7, diff6, 2, - diff1, diff3, diff5, diff7); - MUL4(diff1, fact0, diff3, fact2, diff5, fact4, diff7, fact6, - diff1, diff3, diff5, diff7); - - diff1 += diff0 * fact1; - diff3 += diff2 * fact3; - diff5 += diff4 * fact5; - diff7 += diff6 * fact7; - - SRARI_H4_SH(diff1, diff3, diff5, diff7, 5); - PCKEV_B4_SB(diff1, diff1, diff3, diff3, diff5, diff5, diff7, diff7, - dst_val0, dst_val1, dst_val2, dst_val3); - ILVR_B2_SH(dst_val1, dst_val0, dst_val3, dst_val2, diff0, diff1); - ILVRL_H2_SH(diff1, diff0, diff3, diff4); - ST_W8(diff3, diff4, 0, 1, 2, 3, 0, 1, 2, 3, dst_org, stride); - dst += 4; - } -} - -static void hevc_intra_pred_angular_lower_16width_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, - int32_t stride, - int32_t mode) -{ - int16_t inv_angle[] = { -4096, -1638, -910, -630, -482, -390, -315 }; - int32_t h_cnt, v_cnt, idx0, fact_val0, idx1, fact_val1; - int32_t idx2, fact_val2, idx3, fact_val3, tmp0; - v16i8 top0, top1, dst_val0, top2, top3, dst_val1; - v16i8 top4, top5, dst_val2, top6, top7, dst_val3; - v8i16 fact0, fact1, fact2, fact3, fact4, fact5, fact6, fact7; - v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7; - v8i16 diff8, diff9, diff10, diff11, diff12, diff13, diff14, diff15; - int32_t angle, angle_loop, inv_angle_val, offset; - uint8_t ref_array[3 * 32 + 4]; - uint8_t *ref_tmp = ref_array + 16; - const uint8_t *ref, *src_top_tmp = src_top - 1; - uint8_t *dst_org; - int32_t last; - - angle = intra_pred_angle_low[mode - 2]; - last = (angle) >> 1; - angle_loop = angle; - - ref = src_left - 1; - if (last < -1) { - inv_angle_val = inv_angle[mode - 11]; - - top0 = LD_SB(ref); - tmp0 = LW(ref + 16); - ST_SB(top0, ref_tmp); - SW(tmp0, ref_tmp + 16); - - for (h_cnt = last; h_cnt <= -1; h_cnt++) { - offset = (h_cnt * inv_angle_val + 128) >> 8; - ref_tmp[h_cnt] = src_top_tmp[offset]; - } - - ref = ref_tmp; - } - - for (v_cnt = 0; v_cnt < 4; v_cnt++) { - dst_org = dst; - - idx0 = angle_loop >> 5; - fact_val0 = angle_loop & 31; - angle_loop += angle; - - idx1 = angle_loop >> 5; - fact_val1 = angle_loop & 31; - angle_loop += angle; - - idx2 = angle_loop >> 5; - fact_val2 = angle_loop & 31; - angle_loop += angle; - - idx3 = angle_loop >> 5; - fact_val3 = angle_loop & 31; - angle_loop += angle; - - LD_SB2(ref + idx0 + 1, 16, top0, top1); - LD_SB2(ref + idx1 + 1, 16, top2, top3); - LD_SB2(ref + idx2 + 1, 16, top4, top5); - LD_SB2(ref + idx3 + 1, 16, top6, top7); - - fact0 = __msa_fill_h(fact_val0); - fact1 = __msa_fill_h(32 - fact_val0); - fact2 = __msa_fill_h(fact_val1); - fact3 = __msa_fill_h(32 - fact_val1); - fact4 = __msa_fill_h(fact_val2); - fact5 = __msa_fill_h(32 - fact_val2); - fact6 = __msa_fill_h(fact_val3); - fact7 = __msa_fill_h(32 - fact_val3); - - SLDI_B4_SB(top1, top0, top3, top2, top5, top4, top7, top6, 1, - top1, top3, top5, top7); - - UNPCK_UB_SH(top0, diff0, diff1); - UNPCK_UB_SH(top1, diff2, diff3); - UNPCK_UB_SH(top2, diff4, diff5); - UNPCK_UB_SH(top3, diff6, diff7); - UNPCK_UB_SH(top4, diff8, diff9); - UNPCK_UB_SH(top5, diff10, diff11); - UNPCK_UB_SH(top6, diff12, diff13); - UNPCK_UB_SH(top7, diff14, diff15); - - MUL4(diff2, fact0, diff3, fact0, diff6, fact2, diff7, fact2, - diff2, diff3, diff6, diff7); - MUL4(diff10, fact4, diff11, fact4, diff14, fact6, diff15, fact6, - diff10, diff11, diff14, diff15); - - diff2 += diff0 * fact1; - diff3 += diff1 * fact1; - diff6 += diff4 * fact3; - diff7 += diff5 * fact3; - diff10 += diff8 * fact5; - diff11 += diff9 * fact5; - diff14 += diff12 * fact7; - diff15 += diff13 * fact7; - - SRARI_H4_SH(diff2, diff3, diff6, diff7, 5); - SRARI_H4_SH(diff10, diff11, diff14, diff15, 5); - PCKEV_B4_SB(diff3, diff2, diff7, diff6, diff11, diff10, diff15, diff14, - dst_val0, dst_val1, dst_val2, dst_val3); - ILVR_B2_SH(dst_val1, dst_val0, dst_val3, dst_val2, diff0, diff1); - ILVL_B2_SH(dst_val1, dst_val0, dst_val3, dst_val2, diff2, diff3); - ILVRL_H2_SH(diff1, diff0, diff4, diff5); - ILVRL_H2_SH(diff3, diff2, diff6, diff7); - ST_W8(diff4, diff5, 0, 1, 2, 3, 0, 1, 2, 3, dst_org, stride); - dst_org += (8 * stride); - ST_W8(diff6, diff7, 0, 1, 2, 3, 0, 1, 2, 3, dst_org, stride); - dst += 4; - } -} - -static void hevc_intra_pred_angular_lower_32width_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, - int32_t stride, - int32_t mode) -{ - int16_t inv_angle[] = { -4096, -1638, -910, -630, -482, -390, -315 }; - int32_t h_cnt, v_cnt, idx0, fact_val0, idx1, fact_val1, tmp0; - v16i8 top0, top1, dst_val0, top2, top3, dst_val1; - v16i8 top4, top5, dst_val2, top6, top7, dst_val3; - v8i16 fact0, fact1, fact2, fact3; - v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7; - v8i16 diff8, diff9, diff10, diff11, diff12, diff13, diff14, diff15; - int32_t angle, angle_loop, inv_angle_val, offset; - uint8_t ref_array[3 * 32 + 4]; - uint8_t *ref_tmp = ref_array + 32; - const uint8_t *ref, *src_top_tmp = src_top - 1; - uint8_t *dst_org; - int32_t last; - - angle = intra_pred_angle_low[mode - 2]; - last = angle; - angle_loop = angle; - - ref = src_left - 1; - if (last < -1) { - inv_angle_val = inv_angle[mode - 11]; - - LD_SB2(ref, 16, top0, top1); - tmp0 = LW(ref + 32); - ST_SB2(top0, top1, ref_tmp, 16); - SW(tmp0, ref_tmp + 32); - - for (h_cnt = last; h_cnt <= -1; h_cnt++) { - offset = (h_cnt * inv_angle_val + 128) >> 8; - ref_tmp[h_cnt] = src_top_tmp[offset]; - } - - ref = ref_tmp; - } - - for (v_cnt = 0; v_cnt < 16; v_cnt++) { - dst_org = dst; - idx0 = angle_loop >> 5; - fact_val0 = angle_loop & 31; - angle_loop += angle; - - idx1 = angle_loop >> 5; - fact_val1 = angle_loop & 31; - angle_loop += angle; - - top0 = LD_SB(ref + idx0 + 1); - top4 = LD_SB(ref + idx1 + 1); - top1 = LD_SB(ref + idx0 + 17); - top5 = LD_SB(ref + idx1 + 17); - top3 = LD_SB(ref + idx0 + 33); - top7 = LD_SB(ref + idx1 + 33); - - fact0 = __msa_fill_h(fact_val0); - fact1 = __msa_fill_h(32 - fact_val0); - fact2 = __msa_fill_h(fact_val1); - fact3 = __msa_fill_h(32 - fact_val1); - - top2 = top1; - top6 = top5; - - SLDI_B4_SB(top1, top0, top3, top2, top5, top4, top7, top6, 1, - top1, top3, top5, top7); - - UNPCK_UB_SH(top0, diff0, diff1); - UNPCK_UB_SH(top1, diff2, diff3); - UNPCK_UB_SH(top2, diff4, diff5); - UNPCK_UB_SH(top3, diff6, diff7); - UNPCK_UB_SH(top4, diff8, diff9); - UNPCK_UB_SH(top5, diff10, diff11); - UNPCK_UB_SH(top6, diff12, diff13); - UNPCK_UB_SH(top7, diff14, diff15); - - MUL4(diff2, fact0, diff3, fact0, diff6, fact0, diff7, fact0, - diff2, diff3, diff6, diff7); - MUL4(diff10, fact2, diff11, fact2, diff14, fact2, diff15, fact2, - diff10, diff11, diff14, diff15); - - diff2 += diff0 * fact1; - diff3 += diff1 * fact1; - diff6 += diff4 * fact1; - diff7 += diff5 * fact1; - diff10 += diff8 * fact3; - diff11 += diff9 * fact3; - diff14 += diff12 * fact3; - diff15 += diff13 * fact3; - - SRARI_H4_SH(diff2, diff3, diff6, diff7, 5); - SRARI_H4_SH(diff10, diff11, diff14, diff15, 5); - PCKEV_B4_SB(diff3, diff2, diff7, diff6, diff11, diff10, diff15, diff14, - dst_val0, dst_val1, dst_val2, dst_val3); - ILVRL_B2_SH(dst_val2, dst_val0, diff0, diff1); - ILVRL_B2_SH(dst_val3, dst_val1, diff2, diff3); - - ST_H8(diff0, 0, 1, 2, 3, 4, 5, 6, 7, dst_org, stride) - dst_org += (8 * stride); - ST_H8(diff1, 0, 1, 2, 3, 4, 5, 6, 7, dst_org, stride) - dst_org += (8 * stride); - ST_H8(diff2, 0, 1, 2, 3, 4, 5, 6, 7, dst_org, stride) - dst_org += (8 * stride); - ST_H8(diff3, 0, 1, 2, 3, 4, 5, 6, 7, dst_org, stride) - dst_org += (8 * stride); - - dst += 2; - } -} - -static void intra_predict_vert_32x32_msa(const uint8_t *src, uint8_t *dst, - int32_t dst_stride) -{ - uint32_t row; - v16u8 src1, src2; - - src1 = LD_UB(src); - src2 = LD_UB(src + 16); - - for (row = 32; row--;) { - ST_UB2(src1, src2, dst, 16); - dst += dst_stride; - } -} - -void ff_hevc_intra_pred_planar_0_msa(uint8_t *dst, - const uint8_t *src_top, - const uint8_t *src_left, - ptrdiff_t stride) -{ - hevc_intra_pred_plane_4x4_msa(src_top, src_left, dst, stride); -} - -void ff_hevc_intra_pred_planar_1_msa(uint8_t *dst, - const uint8_t *src_top, - const uint8_t *src_left, - ptrdiff_t stride) -{ - hevc_intra_pred_plane_8x8_msa(src_top, src_left, dst, stride); -} - -void ff_hevc_intra_pred_planar_2_msa(uint8_t *dst, - const uint8_t *src_top, - const uint8_t *src_left, - ptrdiff_t stride) -{ - hevc_intra_pred_plane_16x16_msa(src_top, src_left, dst, stride); -} - -void ff_hevc_intra_pred_planar_3_msa(uint8_t *dst, - const uint8_t *src_top, - const uint8_t *src_left, - ptrdiff_t stride) -{ - hevc_intra_pred_plane_32x32_msa(src_top, src_left, dst, stride); -} - -void ff_hevc_intra_pred_dc_msa(uint8_t *dst, const uint8_t *src_top, - const uint8_t *src_left, - ptrdiff_t stride, int log2, int c_idx) -{ - switch (log2) { - case 2: - hevc_intra_pred_dc_4x4_msa(src_top, src_left, dst, stride, c_idx); - break; - - case 3: - hevc_intra_pred_dc_8x8_msa(src_top, src_left, dst, stride, c_idx); - break; - - case 4: - hevc_intra_pred_dc_16x16_msa(src_top, src_left, dst, stride, c_idx); - break; - - case 5: - hevc_intra_pred_dc_32x32_msa(src_top, src_left, dst, stride); - break; - } -} - -void ff_pred_intra_pred_angular_0_msa(uint8_t *dst, - const uint8_t *src_top, - const uint8_t *src_left, - ptrdiff_t stride, int c_idx, int mode) -{ - if (mode == 10) { - hevc_intra_pred_horiz_4x4_msa(src_top, src_left, dst, stride, c_idx); - } else if (mode == 26) { - hevc_intra_pred_vert_4x4_msa(src_top, src_left, dst, stride, c_idx); - } else if (mode >= 18) { - hevc_intra_pred_angular_upper_4width_msa(src_top, src_left, - dst, stride, mode); - } else { - hevc_intra_pred_angular_lower_4width_msa(src_top, src_left, - dst, stride, mode); - } -} - -void ff_pred_intra_pred_angular_1_msa(uint8_t *dst, - const uint8_t *src_top, - const uint8_t *src_left, - ptrdiff_t stride, int c_idx, int mode) -{ - if (mode == 10) { - hevc_intra_pred_horiz_8x8_msa(src_top, src_left, dst, stride, c_idx); - } else if (mode == 26) { - hevc_intra_pred_vert_8x8_msa(src_top, src_left, dst, stride, c_idx); - } else if (mode >= 18) { - hevc_intra_pred_angular_upper_8width_msa(src_top, src_left, - dst, stride, mode); - } else { - hevc_intra_pred_angular_lower_8width_msa(src_top, src_left, - dst, stride, mode); - } -} - -void ff_pred_intra_pred_angular_2_msa(uint8_t *dst, - const uint8_t *src_top, - const uint8_t *src_left, - ptrdiff_t stride, int c_idx, int mode) -{ - if (mode == 10) { - hevc_intra_pred_horiz_16x16_msa(src_top, src_left, dst, stride, c_idx); - } else if (mode == 26) { - hevc_intra_pred_vert_16x16_msa(src_top, src_left, dst, stride, c_idx); - } else if (mode >= 18) { - hevc_intra_pred_angular_upper_16width_msa(src_top, src_left, - dst, stride, mode); - } else { - hevc_intra_pred_angular_lower_16width_msa(src_top, src_left, - dst, stride, mode); - } -} - -void ff_pred_intra_pred_angular_3_msa(uint8_t *dst, - const uint8_t *src_top, - const uint8_t *src_left, - ptrdiff_t stride, int c_idx, int mode) -{ - if (mode == 10) { - hevc_intra_pred_horiz_32x32_msa(src_top, src_left, dst, stride); - } else if (mode == 26) { - intra_predict_vert_32x32_msa(src_top, dst, stride); - } else if (mode >= 18) { - hevc_intra_pred_angular_upper_32width_msa(src_top, src_left, - dst, stride, mode); - } else { - hevc_intra_pred_angular_lower_32width_msa(src_top, src_left, - dst, stride, mode); - } -} - -void ff_intra_pred_8_16x16_msa(HEVCLocalContext *lc, int x0, int y0, int c_idx) -{ - v16u8 vec0; - const HEVCContext *const s = lc->parent; - int i; - int hshift = s->ps.sps->hshift[c_idx]; - int vshift = s->ps.sps->vshift[c_idx]; - int size_in_luma_h = 16 << hshift; - int size_in_tbs_h = size_in_luma_h >> s->ps.sps->log2_min_tb_size; - int size_in_luma_v = 16 << vshift; - int size_in_tbs_v = size_in_luma_v >> s->ps.sps->log2_min_tb_size; - int x = x0 >> hshift; - int y = y0 >> vshift; - int x_tb = (x0 >> s->ps.sps->log2_min_tb_size) & s->ps.sps->tb_mask; - int y_tb = (y0 >> s->ps.sps->log2_min_tb_size) & s->ps.sps->tb_mask; - - int cur_tb_addr = - s->ps.pps->min_tb_addr_zs[(y_tb) * (s->ps.sps->tb_mask + 2) + (x_tb)]; - - ptrdiff_t stride = s->frame->linesize[c_idx] / sizeof(uint8_t); - uint8_t *src = (uint8_t *) s->frame->data[c_idx] + x + y * stride; - - int min_pu_width = s->ps.sps->min_pu_width; - - enum IntraPredMode mode = c_idx ? lc->tu.intra_pred_mode_c : - lc->tu.intra_pred_mode; - uint32_t a; - uint8_t left_array[2 * 32 + 1]; - uint8_t filtered_left_array[2 * 32 + 1]; - uint8_t top_array[2 * 32 + 1]; - uint8_t filtered_top_array[2 * 32 + 1]; - - uint8_t *left = left_array + 1; - uint8_t *top = top_array + 1; - uint8_t *filtered_left = filtered_left_array + 1; - uint8_t *filtered_top = filtered_top_array + 1; - int cand_bottom_left = lc->na.cand_bottom_left - && cur_tb_addr > - s->ps.pps->min_tb_addr_zs[((y_tb + size_in_tbs_v) & s->ps.sps->tb_mask) * - (s->ps.sps->tb_mask + 2) + (x_tb - 1)]; - int cand_left = lc->na.cand_left; - int cand_up_left = lc->na.cand_up_left; - int cand_up = lc->na.cand_up; - int cand_up_right = lc->na.cand_up_right - && cur_tb_addr > - s->ps.pps->min_tb_addr_zs[(y_tb - 1) * (s->ps.sps->tb_mask + 2) + - ((x_tb + size_in_tbs_h) & s->ps.sps->tb_mask)]; - - int bottom_left_size = - (((y0 + 2 * size_in_luma_v) > - (s->ps.sps->height) ? (s->ps.sps->height) : (y0 + - 2 * size_in_luma_v)) - - (y0 + size_in_luma_v)) >> vshift; - int top_right_size = - (((x0 + 2 * size_in_luma_h) > - (s->ps.sps->width) ? (s->ps.sps->width) : (x0 + 2 * size_in_luma_h)) - - (x0 + size_in_luma_h)) >> hshift; - - if (s->ps.pps->constrained_intra_pred_flag == 1) { - int size_in_luma_pu_v = ((size_in_luma_v) >> s->ps.sps->log2_min_pu_size); - int size_in_luma_pu_h = ((size_in_luma_h) >> s->ps.sps->log2_min_pu_size); - int on_pu_edge_x = !(x0 & ((1 << s->ps.sps->log2_min_pu_size) - 1)); - int on_pu_edge_y = !(y0 & ((1 << s->ps.sps->log2_min_pu_size) - 1)); - if (!size_in_luma_pu_h) - size_in_luma_pu_h++; - if (cand_bottom_left == 1 && on_pu_edge_x) { - int x_left_pu = ((x0 - 1) >> s->ps.sps->log2_min_pu_size); - int y_bottom_pu = - ((y0 + size_in_luma_v) >> s->ps.sps->log2_min_pu_size); - int max = - ((size_in_luma_pu_v) > - (s->ps.sps->min_pu_height - - y_bottom_pu) ? (s->ps.sps->min_pu_height - - y_bottom_pu) : (size_in_luma_pu_v)); - cand_bottom_left = 0; - for (i = 0; i < max; i += 2) - cand_bottom_left |= - ((s->ref->tab_mvf[(x_left_pu) + - (y_bottom_pu + - i) * min_pu_width]).pred_flag == - PF_INTRA); - } - if (cand_left == 1 && on_pu_edge_x) { - int x_left_pu = ((x0 - 1) >> s->ps.sps->log2_min_pu_size); - int y_left_pu = ((y0) >> s->ps.sps->log2_min_pu_size); - int max = - ((size_in_luma_pu_v) > - (s->ps.sps->min_pu_height - - y_left_pu) ? (s->ps.sps->min_pu_height - - y_left_pu) : (size_in_luma_pu_v)); - cand_left = 0; - for (i = 0; i < max; i += 2) - cand_left |= - ((s->ref->tab_mvf[(x_left_pu) + - (y_left_pu + - i) * min_pu_width]).pred_flag == - PF_INTRA); - } - if (cand_up_left == 1) { - int x_left_pu = ((x0 - 1) >> s->ps.sps->log2_min_pu_size); - int y_top_pu = ((y0 - 1) >> s->ps.sps->log2_min_pu_size); - cand_up_left = - (s->ref->tab_mvf[(x_left_pu) + - (y_top_pu) * min_pu_width]).pred_flag == - PF_INTRA; - } - if (cand_up == 1 && on_pu_edge_y) { - int x_top_pu = ((x0) >> s->ps.sps->log2_min_pu_size); - int y_top_pu = ((y0 - 1) >> s->ps.sps->log2_min_pu_size); - int max = - ((size_in_luma_pu_h) > - (s->ps.sps->min_pu_width - - x_top_pu) ? (s->ps.sps->min_pu_width - - x_top_pu) : (size_in_luma_pu_h)); - cand_up = 0; - for (i = 0; i < max; i += 2) - cand_up |= - ((s->ref->tab_mvf[(x_top_pu + i) + - (y_top_pu) * - min_pu_width]).pred_flag == PF_INTRA); - } - if (cand_up_right == 1 && on_pu_edge_y) { - int y_top_pu = ((y0 - 1) >> s->ps.sps->log2_min_pu_size); - int x_right_pu = - ((x0 + size_in_luma_h) >> s->ps.sps->log2_min_pu_size); - int max = - ((size_in_luma_pu_h) > - (s->ps.sps->min_pu_width - - x_right_pu) ? (s->ps.sps->min_pu_width - - x_right_pu) : (size_in_luma_pu_h)); - cand_up_right = 0; - for (i = 0; i < max; i += 2) - cand_up_right |= - ((s->ref->tab_mvf[(x_right_pu + i) + - (y_top_pu) * - min_pu_width]).pred_flag == PF_INTRA); - } - - vec0 = (v16u8) __msa_ldi_b(128); - - ST_UB4(vec0, vec0, vec0, vec0, left, 16); - - ST_UB4(vec0, vec0, vec0, vec0, top, 16); - - top[-1] = 128; - } - if (cand_up_left) { - left[-1] = src[(-1) + stride * (-1)]; - top[-1] = left[-1]; - } - if (cand_up) { - vec0 = LD_UB(src - stride); - ST_UB(vec0, top); - } - if (cand_up_right) { - vec0 = LD_UB(src - stride + 16); - ST_UB(vec0, (top + 16)); - - do { - uint32_t pix = - ((src[(16 + top_right_size - 1) + stride * (-1)]) * - 0x01010101U); - for (i = 0; i < (16 - top_right_size); i += 4) - ((((union unaligned_32 *) (top + 16 + top_right_size + - i))->l) = (pix)); - } while (0); - } - if (cand_left) - for (i = 0; i < 16; i++) - left[i] = src[(-1) + stride * (i)]; - if (cand_bottom_left) { - for (i = 16; i < 16 + bottom_left_size; i++) - left[i] = src[(-1) + stride * (i)]; - do { - uint32_t pix = - ((src[(-1) + stride * (16 + bottom_left_size - 1)]) * - 0x01010101U); - for (i = 0; i < (16 - bottom_left_size); i += 4) - ((((union unaligned_32 *) (left + 16 + bottom_left_size + - i))->l) = (pix)); - } while (0); - } - - if (s->ps.pps->constrained_intra_pred_flag == 1) { - if (cand_bottom_left || cand_left || cand_up_left || cand_up - || cand_up_right) { - int size_max_x = - x0 + ((2 * 16) << hshift) < - s->ps.sps->width ? 2 * 16 : (s->ps.sps->width - x0) >> hshift; - int size_max_y = - y0 + ((2 * 16) << vshift) < - s->ps.sps->height ? 2 * 16 : (s->ps.sps->height - y0) >> vshift; - int j = 16 + (cand_bottom_left ? bottom_left_size : 0) - 1; - if (!cand_up_right) { - size_max_x = x0 + ((16) << hshift) < s->ps.sps->width ? - 16 : (s->ps.sps->width - x0) >> hshift; - } - if (!cand_bottom_left) { - size_max_y = y0 + ((16) << vshift) < s->ps.sps->height ? - 16 : (s->ps.sps->height - y0) >> vshift; - } - if (cand_bottom_left || cand_left || cand_up_left) { - while (j > -1 - && - !((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((j) << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - j--; - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + ((j) - << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == PF_INTRA)) { - j = 0; - while (j < size_max_x - && - !((s->ref->tab_mvf[(((x0 + - ((j) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((-1) << - vshift)) - >> s-> - ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - j++; - for (i = j; i > (j) - (j + 1); i--) - if (! - ((s->ref->tab_mvf[(((x0 + - ((i - - 1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((-1) << - vshift)) - >> s-> - ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - top[i - 1] = top[i]; - left[-1] = top[-1]; - } - } else { - j = 0; - while (j < size_max_x - && - !((s->ref->tab_mvf[(((x0 + - ((j) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + ((-1) - << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - j++; - if (j > 0) - if (x0 > 0) { - for (i = j; i > (j) - (j + 1); i--) - if (! - ((s->ref->tab_mvf[(((x0 + - ((i - - 1) << hshift)) >> - s->ps.sps->log2_min_pu_size)) - + (((y0 + ((-1) - << vshift)) - >> - s->ps.sps->log2_min_pu_size)) - * - min_pu_width]).pred_flag == - PF_INTRA)) - top[i - 1] = top[i]; - } else { - for (i = j; i > (j) - (j); i--) - if (! - ((s->ref->tab_mvf[(((x0 + - ((i - - 1) << hshift)) >> - s->ps.sps->log2_min_pu_size)) - + (((y0 + ((-1) - << vshift)) - >> - s->ps.sps->log2_min_pu_size)) - * - min_pu_width]).pred_flag == - PF_INTRA)) - top[i - 1] = top[i]; - top[-1] = top[0]; - } - left[-1] = top[-1]; - } - left[-1] = top[-1]; - if (cand_bottom_left || cand_left) { - a = ((left[-1]) * 0x01010101U); - for (i = 0; i < (0) + (size_max_y); i += 4) - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((i) << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - ((((union unaligned_32 *) (&left[i]))->l) = (a)); - else - a = ((left[i + 3]) * 0x01010101U); - } - if (!cand_left) { - vec0 = (v16u8) __msa_fill_b(left[-1]); - - ST_UB(vec0, left); - } - if (!cand_bottom_left) { - - vec0 = (v16u8) __msa_fill_b(left[15]); - - ST_UB(vec0, (left + 16)); - } - if (x0 != 0 && y0 != 0) { - a = ((left[size_max_y - 1]) * 0x01010101U); - for (i = (size_max_y - 1); - i > (size_max_y - 1) - (size_max_y); i -= 4) - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((i - - 3) << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - ((((union unaligned_32 *) (&left[i - 3]))->l) = (a)); - else - a = ((left[i - 3]) * 0x01010101U); - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + ((-1) - << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == PF_INTRA)) - left[-1] = left[0]; - } else if (x0 == 0) { - do { - uint32_t pix = ((0) * 0x01010101U); - for (i = 0; i < (size_max_y); i += 4) - ((((union unaligned_32 *) (left + i))->l) = (pix)); - } while (0); - } else { - a = ((left[size_max_y - 1]) * 0x01010101U); - for (i = (size_max_y - 1); - i > (size_max_y - 1) - (size_max_y); i -= 4) - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((i - - 3) << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - ((((union unaligned_32 *) (&left[i - 3]))->l) = (a)); - else - a = ((left[i - 3]) * 0x01010101U); - } - top[-1] = left[-1]; - if (y0 != 0) { - a = ((left[-1]) * 0x01010101U); - for (i = 0; i < (0) + (size_max_x); i += 4) - if (! - ((s->ref->tab_mvf[(((x0 + - ((i) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + ((-1) - << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - ((((union unaligned_32 *) (&top[i]))->l) = (a)); - else - a = ((top[i + 3]) * 0x01010101U); - } - } - } - - if (!cand_bottom_left) { - if (cand_left) { - vec0 = (v16u8) __msa_fill_b(left[15]); - - ST_UB(vec0, (left + 16)); - - } else if (cand_up_left) { - vec0 = (v16u8) __msa_fill_b(left[-1]); - - ST_UB2(vec0, vec0, left, 16); - - cand_left = 1; - } else if (cand_up) { - left[-1] = top[0]; - - vec0 = (v16u8) __msa_fill_b(left[-1]); - - ST_UB2(vec0, vec0, left, 16); - - cand_up_left = 1; - cand_left = 1; - } else if (cand_up_right) { - vec0 = (v16u8) __msa_fill_b(top[16]); - - ST_UB(vec0, top); - - left[-1] = top[16]; - - ST_UB2(vec0, vec0, left, 16); - - cand_up = 1; - cand_up_left = 1; - cand_left = 1; - } else { - left[-1] = 128; - vec0 = (v16u8) __msa_ldi_b(128); - - ST_UB2(vec0, vec0, top, 16); - ST_UB2(vec0, vec0, left, 16); - } - } - - if (!cand_left) { - vec0 = (v16u8) __msa_fill_b(left[16]); - ST_UB(vec0, left); - } - if (!cand_up_left) { - left[-1] = left[0]; - } - if (!cand_up) { - vec0 = (v16u8) __msa_fill_b(left[-1]); - ST_UB(vec0, top); - } - if (!cand_up_right) { - vec0 = (v16u8) __msa_fill_b(top[15]); - ST_UB(vec0, (top + 16)); - } - - top[-1] = left[-1]; - - - if (!s->ps.sps->intra_smoothing_disabled_flag - && (c_idx == 0 || s->ps.sps->chroma_format_idc == 3)) { - if (mode != INTRA_DC && 16 != 4) { - int intra_hor_ver_dist_thresh[] = { 7, 1, 0 }; - int min_dist_vert_hor = - (((((int) (mode - 26U)) >= - 0 ? ((int) (mode - 26U)) : (-((int) (mode - 26U))))) > - ((((int) (mode - 10U)) >= - 0 ? ((int) (mode - 10U)) : (-((int) (mode - 10U))))) - ? ((((int) (mode - 10U)) >= - 0 ? ((int) (mode - 10U)) : (-((int) (mode - 10U))))) - : ((((int) (mode - 26U)) >= - 0 ? ((int) (mode - 26U)) : (-((int) (mode - 26U)))))); - if (min_dist_vert_hor > intra_hor_ver_dist_thresh[4 - 3]) { - filtered_left[2 * 16 - 1] = left[2 * 16 - 1]; - filtered_top[2 * 16 - 1] = top[2 * 16 - 1]; - for (i = 2 * 16 - 2; i >= 0; i--) - filtered_left[i] = (left[i + 1] + 2 * left[i] + - left[i - 1] + 2) >> 2; - filtered_top[-1] = - filtered_left[-1] = - (left[0] + 2 * left[-1] + top[0] + 2) >> 2; - for (i = 2 * 16 - 2; i >= 0; i--) - filtered_top[i] = (top[i + 1] + 2 * top[i] + - top[i - 1] + 2) >> 2; - left = filtered_left; - top = filtered_top; - } - } - } - - switch (mode) { - case INTRA_PLANAR: - s->hpc.pred_planar[4 - 2] ((uint8_t *) src, (uint8_t *) top, - (uint8_t *) left, stride); - break; - case INTRA_DC: - s->hpc.pred_dc((uint8_t *) src, (uint8_t *) top, - (uint8_t *) left, stride, 4, c_idx); - break; - default: - s->hpc.pred_angular[4 - 2] ((uint8_t *) src, (uint8_t *) top, - (uint8_t *) left, stride, c_idx, mode); - break; - } -} - -void ff_intra_pred_8_32x32_msa(HEVCLocalContext *lc, int x0, int y0, int c_idx) -{ - v16u8 vec0, vec1; - v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - v8i16 res0, res1, res2, res3; - v8i16 mul_val0 = { 63, 62, 61, 60, 59, 58, 57, 56 }; - v8i16 mul_val1 = { 1, 2, 3, 4, 5, 6, 7, 8 }; - const HEVCContext *const s = lc->parent; - int i; - int hshift = s->ps.sps->hshift[c_idx]; - int vshift = s->ps.sps->vshift[c_idx]; - int size_in_luma_h = 32 << hshift; - int size_in_tbs_h = size_in_luma_h >> s->ps.sps->log2_min_tb_size; - int size_in_luma_v = 32 << vshift; - int size_in_tbs_v = size_in_luma_v >> s->ps.sps->log2_min_tb_size; - int x = x0 >> hshift; - int y = y0 >> vshift; - int x_tb = (x0 >> s->ps.sps->log2_min_tb_size) & s->ps.sps->tb_mask; - int y_tb = (y0 >> s->ps.sps->log2_min_tb_size) & s->ps.sps->tb_mask; - - int cur_tb_addr = - s->ps.pps->min_tb_addr_zs[(y_tb) * (s->ps.sps->tb_mask + 2) + (x_tb)]; - - ptrdiff_t stride = s->frame->linesize[c_idx] / sizeof(uint8_t); - uint8_t *src = (uint8_t *) s->frame->data[c_idx] + x + y * stride; - - int min_pu_width = s->ps.sps->min_pu_width; - - enum IntraPredMode mode = c_idx ? lc->tu.intra_pred_mode_c : - lc->tu.intra_pred_mode; - uint32_t a; - uint8_t left_array[2 * 32 + 1]; - uint8_t filtered_left_array[2 * 32 + 1]; - uint8_t top_array[2 * 32 + 1]; - uint8_t filtered_top_array[2 * 32 + 1]; - - uint8_t *left = left_array + 1; - uint8_t *top = top_array + 1; - uint8_t *filtered_left = filtered_left_array + 1; - uint8_t *filtered_top = filtered_top_array + 1; - int cand_bottom_left = lc->na.cand_bottom_left - && cur_tb_addr > - s->ps.pps->min_tb_addr_zs[((y_tb + size_in_tbs_v) & s->ps.sps->tb_mask) * - (s->ps.sps->tb_mask + 2) + (x_tb - 1)]; - int cand_left = lc->na.cand_left; - int cand_up_left = lc->na.cand_up_left; - int cand_up = lc->na.cand_up; - int cand_up_right = lc->na.cand_up_right - && cur_tb_addr > - s->ps.pps->min_tb_addr_zs[(y_tb - 1) * (s->ps.sps->tb_mask + 2) + - ((x_tb + size_in_tbs_h) & s->ps.sps->tb_mask)]; - - int bottom_left_size = - (((y0 + 2 * size_in_luma_v) > - (s->ps.sps->height) ? (s->ps.sps->height) : (y0 + - 2 * size_in_luma_v)) - - (y0 + size_in_luma_v)) >> vshift; - int top_right_size = - (((x0 + 2 * size_in_luma_h) > - (s->ps.sps->width) ? (s->ps.sps->width) : (x0 + 2 * size_in_luma_h)) - - (x0 + size_in_luma_h)) >> hshift; - - if (s->ps.pps->constrained_intra_pred_flag == 1) { - int size_in_luma_pu_v = ((size_in_luma_v) >> s->ps.sps->log2_min_pu_size); - int size_in_luma_pu_h = ((size_in_luma_h) >> s->ps.sps->log2_min_pu_size); - int on_pu_edge_x = !(x0 & ((1 << s->ps.sps->log2_min_pu_size) - 1)); - int on_pu_edge_y = !(y0 & ((1 << s->ps.sps->log2_min_pu_size) - 1)); - if (!size_in_luma_pu_h) - size_in_luma_pu_h++; - if (cand_bottom_left == 1 && on_pu_edge_x) { - int x_left_pu = ((x0 - 1) >> s->ps.sps->log2_min_pu_size); - int y_bottom_pu = - ((y0 + size_in_luma_v) >> s->ps.sps->log2_min_pu_size); - int max = - ((size_in_luma_pu_v) > - (s->ps.sps->min_pu_height - - y_bottom_pu) ? (s->ps.sps->min_pu_height - - y_bottom_pu) : (size_in_luma_pu_v)); - cand_bottom_left = 0; - for (i = 0; i < max; i += 2) - cand_bottom_left |= - ((s->ref->tab_mvf[(x_left_pu) + - (y_bottom_pu + - i) * min_pu_width]).pred_flag == - PF_INTRA); - } - if (cand_left == 1 && on_pu_edge_x) { - int x_left_pu = ((x0 - 1) >> s->ps.sps->log2_min_pu_size); - int y_left_pu = ((y0) >> s->ps.sps->log2_min_pu_size); - int max = - ((size_in_luma_pu_v) > - (s->ps.sps->min_pu_height - - y_left_pu) ? (s->ps.sps->min_pu_height - - y_left_pu) : (size_in_luma_pu_v)); - cand_left = 0; - for (i = 0; i < max; i += 2) - cand_left |= - ((s->ref->tab_mvf[(x_left_pu) + - (y_left_pu + - i) * min_pu_width]).pred_flag == - PF_INTRA); - } - if (cand_up_left == 1) { - int x_left_pu = ((x0 - 1) >> s->ps.sps->log2_min_pu_size); - int y_top_pu = ((y0 - 1) >> s->ps.sps->log2_min_pu_size); - cand_up_left = - (s->ref->tab_mvf[(x_left_pu) + - (y_top_pu) * min_pu_width]).pred_flag == - PF_INTRA; - } - if (cand_up == 1 && on_pu_edge_y) { - int x_top_pu = ((x0) >> s->ps.sps->log2_min_pu_size); - int y_top_pu = ((y0 - 1) >> s->ps.sps->log2_min_pu_size); - int max = - ((size_in_luma_pu_h) > - (s->ps.sps->min_pu_width - - x_top_pu) ? (s->ps.sps->min_pu_width - - x_top_pu) : (size_in_luma_pu_h)); - cand_up = 0; - for (i = 0; i < max; i += 2) - cand_up |= - ((s->ref->tab_mvf[(x_top_pu + i) + - (y_top_pu) * - min_pu_width]).pred_flag == PF_INTRA); - } - if (cand_up_right == 1 && on_pu_edge_y) { - int y_top_pu = ((y0 - 1) >> s->ps.sps->log2_min_pu_size); - int x_right_pu = - ((x0 + size_in_luma_h) >> s->ps.sps->log2_min_pu_size); - int max = - ((size_in_luma_pu_h) > - (s->ps.sps->min_pu_width - - x_right_pu) ? (s->ps.sps->min_pu_width - - x_right_pu) : (size_in_luma_pu_h)); - cand_up_right = 0; - for (i = 0; i < max; i += 2) - cand_up_right |= - ((s->ref->tab_mvf[(x_right_pu + i) + - (y_top_pu) * - min_pu_width]).pred_flag == PF_INTRA); - } - vec0 = (v16u8) __msa_ldi_b(128); - - ST_UB4(vec0, vec0, vec0, vec0, left, 16); - ST_UB4(vec0, vec0, vec0, vec0, top, 16); - - top[-1] = 128; - } - if (cand_up_left) { - left[-1] = src[(-1) + stride * (-1)]; - top[-1] = left[-1]; - } - if (cand_up) { - LD_UB2(src - stride, 16, vec0, vec1); - ST_UB2(vec0, vec1, top, 16); - } - - if (cand_up_right) { - LD_UB2(src - stride + 32, 16, vec0, vec1); - ST_UB2(vec0, vec1, (top + 32), 16); - do { - uint32_t pix = - ((src[(32 + top_right_size - 1) + stride * (-1)]) * - 0x01010101U); - for (i = 0; i < (32 - top_right_size); i += 4) - ((((union unaligned_32 *) (top + 32 + top_right_size + - i))->l) = (pix)); - } while (0); - } - if (cand_left) - for (i = 0; i < 32; i++) - left[i] = src[(-1) + stride * (i)]; - if (cand_bottom_left) { - for (i = 32; i < 32 + bottom_left_size; i++) - left[i] = src[(-1) + stride * (i)]; - do { - uint32_t pix = - ((src[(-1) + stride * (32 + bottom_left_size - 1)]) * - 0x01010101U); - for (i = 0; i < (32 - bottom_left_size); i += 4) - ((((union unaligned_32 *) (left + 32 + bottom_left_size + - i))->l) = (pix)); - } while (0); - } - - if (s->ps.pps->constrained_intra_pred_flag == 1) { - if (cand_bottom_left || cand_left || cand_up_left || cand_up - || cand_up_right) { - int size_max_x = - x0 + ((2 * 32) << hshift) < - s->ps.sps->width ? 2 * 32 : (s->ps.sps->width - x0) >> hshift; - int size_max_y = - y0 + ((2 * 32) << vshift) < - s->ps.sps->height ? 2 * 32 : (s->ps.sps->height - y0) >> vshift; - int j = 32 + (cand_bottom_left ? bottom_left_size : 0) - 1; - if (!cand_up_right) { - size_max_x = x0 + ((32) << hshift) < s->ps.sps->width ? - 32 : (s->ps.sps->width - x0) >> hshift; - } - if (!cand_bottom_left) { - size_max_y = y0 + ((32) << vshift) < s->ps.sps->height ? - 32 : (s->ps.sps->height - y0) >> vshift; - } - if (cand_bottom_left || cand_left || cand_up_left) { - while (j > -1 - && - !((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((j) << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - j--; - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + ((j) - << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == PF_INTRA)) { - j = 0; - while (j < size_max_x - && - !((s->ref->tab_mvf[(((x0 + - ((j) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((-1) << - vshift)) - >> s-> - ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - j++; - for (i = j; i > (j) - (j + 1); i--) - if (! - ((s->ref->tab_mvf[(((x0 + - ((i - - 1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((-1) << - vshift)) - >> s-> - ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - top[i - 1] = top[i]; - left[-1] = top[-1]; - } - } else { - j = 0; - while (j < size_max_x - && - !((s->ref->tab_mvf[(((x0 + - ((j) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + ((-1) - << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - j++; - if (j > 0) - if (x0 > 0) { - for (i = j; i > (j) - (j + 1); i--) - if (! - ((s->ref->tab_mvf[(((x0 + - ((i - - 1) << hshift)) >> - s->ps.sps->log2_min_pu_size)) - + (((y0 + ((-1) - << vshift)) - >> - s->ps.sps->log2_min_pu_size)) - * - min_pu_width]).pred_flag == - PF_INTRA)) - top[i - 1] = top[i]; - } else { - for (i = j; i > (j) - (j); i--) - if (! - ((s->ref->tab_mvf[(((x0 + - ((i - - 1) << hshift)) >> - s->ps.sps->log2_min_pu_size)) - + (((y0 + ((-1) - << vshift)) - >> - s->ps.sps->log2_min_pu_size)) - * - min_pu_width]).pred_flag == - PF_INTRA)) - top[i - 1] = top[i]; - top[-1] = top[0]; - } - left[-1] = top[-1]; - } - left[-1] = top[-1]; - if (cand_bottom_left || cand_left) { - a = ((left[-1]) * 0x01010101U); - for (i = 0; i < (0) + (size_max_y); i += 4) - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((i) << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - ((((union unaligned_32 *) (&left[i]))->l) = (a)); - else - a = ((left[i + 3]) * 0x01010101U); - } - if (!cand_left) { - vec0 = (v16u8) __msa_fill_b(left[-1]); - - ST_UB2(vec0, vec0, left, 16); - } - if (!cand_bottom_left) { - vec0 = (v16u8) __msa_fill_b(left[31]); - - ST_UB2(vec0, vec0, (left + 32), 16); - } - if (x0 != 0 && y0 != 0) { - a = ((left[size_max_y - 1]) * 0x01010101U); - for (i = (size_max_y - 1); - i > (size_max_y - 1) - (size_max_y); i -= 4) - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((i - - 3) << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - ((((union unaligned_32 *) (&left[i - 3]))->l) = (a)); - else - a = ((left[i - 3]) * 0x01010101U); - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + ((-1) - << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == PF_INTRA)) - left[-1] = left[0]; - } else if (x0 == 0) { - do { - uint32_t pix = ((0) * 0x01010101U); - for (i = 0; i < (size_max_y); i += 4) - ((((union unaligned_32 *) (left + i))->l) = (pix)); - } while (0); - } else { - a = ((left[size_max_y - 1]) * 0x01010101U); - for (i = (size_max_y - 1); - i > (size_max_y - 1) - (size_max_y); i -= 4) - if (! - ((s->ref->tab_mvf[(((x0 + - ((-1) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + - ((i - - 3) << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - ((((union unaligned_32 *) (&left[i - 3]))->l) = (a)); - else - a = ((left[i - 3]) * 0x01010101U); - } - top[-1] = left[-1]; - if (y0 != 0) { - a = ((left[-1]) * 0x01010101U); - for (i = 0; i < (0) + (size_max_x); i += 4) - if (! - ((s->ref->tab_mvf[(((x0 + - ((i) << hshift)) >> s->ps.sps-> - log2_min_pu_size)) + (((y0 + ((-1) - << - vshift)) - >> s->ps.sps-> - log2_min_pu_size)) - * min_pu_width]).pred_flag == - PF_INTRA)) - ((((union unaligned_32 *) (&top[i]))->l) = (a)); - else - a = ((top[i + 3]) * 0x01010101U); - } - } - } - - if (!cand_bottom_left) { - if (cand_left) { - vec0 = (v16u8) __msa_fill_b(left[31]); - - ST_UB2(vec0, vec0, (left + 32), 16); - } else if (cand_up_left) { - vec0 = (v16u8) __msa_fill_b(left[-1]); - - ST_UB4(vec0, vec0, vec0, vec0, left, 16); - - cand_left = 1; - } else if (cand_up) { - left[-1] = top[0]; - - vec0 = (v16u8) __msa_fill_b(left[-1]); - - ST_UB4(vec0, vec0, vec0, vec0, left, 16); - - cand_up_left = 1; - cand_left = 1; - } else if (cand_up_right) { - vec0 = (v16u8) __msa_fill_b(top[32]); - - ST_UB2(vec0, vec0, top, 16); - - left[-1] = top[32]; - - ST_UB4(vec0, vec0, vec0, vec0, left, 16); - - cand_up = 1; - cand_up_left = 1; - cand_left = 1; - } else { - left[-1] = 128; - - vec0 = (v16u8) __msa_ldi_b(128); - - ST_UB4(vec0, vec0, vec0, vec0, top, 16); - ST_UB4(vec0, vec0, vec0, vec0, left, 16); - } - } - - if (!cand_left) { - vec0 = (v16u8) __msa_fill_b(left[32]); - - ST_UB2(vec0, vec0, left, 16); - } - if (!cand_up_left) { - left[-1] = left[0]; - } - if (!cand_up) { - vec0 = (v16u8) __msa_fill_b(left[-1]); - - ST_UB2(vec0, vec0, top, 16); - } - if (!cand_up_right) { - vec0 = (v16u8) __msa_fill_b(top[31]); - - ST_UB2(vec0, vec0, (top + 32), 16); - } - - top[-1] = left[-1]; - - - if (!s->ps.sps->intra_smoothing_disabled_flag - && (c_idx == 0 || s->ps.sps->chroma_format_idc == 3)) { - if (mode != INTRA_DC && 32 != 4) { - int intra_hor_ver_dist_thresh[] = { 7, 1, 0 }; - int min_dist_vert_hor = - (((((int) (mode - 26U)) >= - 0 ? ((int) (mode - 26U)) : (-((int) (mode - 26U))))) > - ((((int) (mode - 10U)) >= - 0 ? ((int) (mode - 10U)) : (-((int) (mode - 10U))))) - ? ((((int) (mode - 10U)) >= - 0 ? ((int) (mode - 10U)) : (-((int) (mode - 10U))))) - : ((((int) (mode - 26U)) >= - 0 ? ((int) (mode - 26U)) : (-((int) (mode - 26U)))))); - if (min_dist_vert_hor > intra_hor_ver_dist_thresh[5 - 3]) { - int threshold = 1 << (8 - 5); - if (s->ps.sps->sps_strong_intra_smoothing_enable_flag - && c_idx == 0 - && ((top[-1] + top[63] - 2 * top[31]) >= - 0 ? (top[-1] + top[63] - - 2 * top[31]) : (-(top[-1] + top[63] - - 2 * top[31]))) < threshold - && ((left[-1] + left[63] - 2 * left[31]) >= - 0 ? (left[-1] + left[63] - - 2 * left[31]) : (-(left[-1] + left[63] - - 2 * left[31]))) < threshold) { - - - filtered_top[-1] = top[-1]; - filtered_top[63] = top[63]; - - - for (i = 0; i < 63; i++) { - filtered_top[i] = - ((63 - i) * top[-1] + (i + 1) * top[63] + 32) >> 6; - } - - tmp0 = __msa_fill_h(top[-1]); - tmp1 = __msa_fill_h(top[63]); - - tmp2 = mul_val0 - 8; - tmp3 = mul_val0 - 16; - tmp4 = mul_val0 - 24; - tmp5 = mul_val1 + 8; - tmp6 = mul_val1 + 16; - tmp7 = mul_val1 + 24; - - res0 = mul_val0 * tmp0; - res1 = tmp2 * tmp0; - res2 = tmp3 * tmp0; - res3 = tmp4 * tmp0; - res0 += mul_val1 * tmp1; - res1 += tmp5 * tmp1; - res2 += tmp6 * tmp1; - res3 += tmp7 * tmp1; - - res0 = __msa_srari_h(res0, 6); - res1 = __msa_srari_h(res1, 6); - res2 = __msa_srari_h(res2, 6); - res3 = __msa_srari_h(res3, 6); - - vec0 = (v16u8) __msa_pckev_b((v16i8) res1, (v16i8) res0); - vec1 = (v16u8) __msa_pckev_b((v16i8) res3, (v16i8) res2); - - ST_UB2(vec0, vec1, filtered_top, 16); - - res0 = mul_val0 - 32; - tmp2 = mul_val0 - 40; - tmp3 = mul_val0 - 48; - tmp4 = mul_val0 - 56; - res3 = mul_val1 + 32; - tmp5 = mul_val1 + 40; - tmp6 = mul_val1 + 48; - tmp7 = mul_val1 + 56; - - res0 = res0 * tmp0; - res1 = tmp2 * tmp0; - res2 = tmp3 * tmp0; - res0 += res3 * tmp1; - res3 = tmp4 * tmp0; - res1 += tmp5 * tmp1; - res2 += tmp6 * tmp1; - res3 += tmp7 * tmp1; - - res0 = __msa_srari_h(res0, 6); - res1 = __msa_srari_h(res1, 6); - res2 = __msa_srari_h(res2, 6); - res3 = __msa_srari_h(res3, 6); - - vec0 = (v16u8) __msa_pckev_b((v16i8) res1, (v16i8) res0); - vec1 = (v16u8) __msa_pckev_b((v16i8) res3, (v16i8) res2); - - ST_UB2(vec0, vec1, (filtered_top + 32), 16); - - filtered_top[63] = top[63]; - - tmp0 = __msa_fill_h(left[-1]); - tmp1 = __msa_fill_h(left[63]); - - tmp2 = mul_val0 - 8; - tmp3 = mul_val0 - 16; - tmp4 = mul_val0 - 24; - tmp5 = mul_val1 + 8; - tmp6 = mul_val1 + 16; - tmp7 = mul_val1 + 24; - - res0 = mul_val0 * tmp0; - res1 = tmp2 * tmp0; - res2 = tmp3 * tmp0; - res3 = tmp4 * tmp0; - res0 += mul_val1 * tmp1; - res1 += tmp5 * tmp1; - res2 += tmp6 * tmp1; - res3 += tmp7 * tmp1; - - res0 = __msa_srari_h(res0, 6); - res1 = __msa_srari_h(res1, 6); - res2 = __msa_srari_h(res2, 6); - res3 = __msa_srari_h(res3, 6); - - vec0 = (v16u8) __msa_pckev_b((v16i8) res1, (v16i8) res0); - vec1 = (v16u8) __msa_pckev_b((v16i8) res3, (v16i8) res2); - - ST_UB2(vec0, vec1, left, 16); - - res0 = mul_val0 - 32; - tmp2 = mul_val0 - 40; - tmp3 = mul_val0 - 48; - tmp4 = mul_val0 - 56; - res3 = mul_val1 + 32; - tmp5 = mul_val1 + 40; - tmp6 = mul_val1 + 48; - tmp7 = mul_val1 + 56; - - res0 = res0 * tmp0; - res1 = tmp2 * tmp0; - res2 = tmp3 * tmp0; - res0 += res3 * tmp1; - res3 = tmp4 * tmp0; - res1 += tmp5 * tmp1; - res2 += tmp6 * tmp1; - res3 += tmp7 * tmp1; - - res0 = __msa_srari_h(res0, 6); - res1 = __msa_srari_h(res1, 6); - res2 = __msa_srari_h(res2, 6); - res3 = __msa_srari_h(res3, 6); - - vec0 = (v16u8) __msa_pckev_b((v16i8) res1, (v16i8) res0); - vec1 = (v16u8) __msa_pckev_b((v16i8) res3, (v16i8) res2); - - ST_UB2(vec0, vec1, (left + 32), 16); - - left[63] = tmp1[0]; - - top = filtered_top; - } else { - filtered_left[2 * 32 - 1] = left[2 * 32 - 1]; - filtered_top[2 * 32 - 1] = top[2 * 32 - 1]; - for (i = 2 * 32 - 2; i >= 0; i--) - filtered_left[i] = (left[i + 1] + 2 * left[i] + - left[i - 1] + 2) >> 2; - filtered_top[-1] = - filtered_left[-1] = - (left[0] + 2 * left[-1] + top[0] + 2) >> 2; - for (i = 2 * 32 - 2; i >= 0; i--) - filtered_top[i] = (top[i + 1] + 2 * top[i] + - top[i - 1] + 2) >> 2; - left = filtered_left; - top = filtered_top; - } - } - } - } - - switch (mode) { - case INTRA_PLANAR: - s->hpc.pred_planar[3] ((uint8_t *) src, (uint8_t *) top, - (uint8_t *) left, stride); - break; - case INTRA_DC: - s->hpc.pred_dc((uint8_t *) src, (uint8_t *) top, - (uint8_t *) left, stride, 5, c_idx); - break; - default: - s->hpc.pred_angular[3] ((uint8_t *) src, (uint8_t *) top, - (uint8_t *) left, stride, c_idx, mode); - break; - } -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/300 Game 2 APK The Ultimate Action Game Based on the Movie.md b/spaces/congsaPfin/Manga-OCR/logs/300 Game 2 APK The Ultimate Action Game Based on the Movie.md deleted file mode 100644 index 7f417389b6ff6de45658b94bd9b6ebddd0fd8a70..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/300 Game 2 APK The Ultimate Action Game Based on the Movie.md +++ /dev/null @@ -1,128 +0,0 @@ -
    -

    300 Game 2 APK: A Review of the Epic Action Game

    -

    If you are a fan of action games, especially those based on historical and mythical events, then you might have heard of the 300 game series. These games are inspired by the movie franchise "300", which depicts the legendary battle of Thermopylae, where a small army of Greek warriors led by King Leonidas faced the massive Persian invasion led by King Xerxes.

    -

    In this article, we will review the latest installment of the series, 300 game 2 apk, which is also known as 300: Seize Your Glory. We will tell you what this game is about, why you should play it, what features it offers, how to download and install it, and some tips and tricks to play better. So, without further ado, let's get started!

    -

    300 game 2 apk


    DOWNLOAD ———>>> https://urlca.com/2uOckC



    -

    Introduction

    -

    What is 300 game 2 apk?

    -

    300 game 2 apk is an action game developed by Warner Bros. International Enterprises, based on the movie "300: Rise of an Empire". The game follows the story of Greek general Themistokles, who tries to unite all of Greece against the invading Persian forces led by Artemisia, the vengeful commander of the Persian navy.

    -

    The game takes you to a fresh battlefield - on the sea - where you have to fight your way through waves of enemy ships, soldiers, and mythical creatures. You can use your sword, shield, spear, and bow to attack and defend yourself. You can also upgrade your weapons and skills as you progress through the game.

    -

    Why should you play 300 game 2 apk?

    -

    There are many reasons why you should play 300 game 2 apk, but here are some of the main ones:

    -
      -
    • It is a free game that you can download and play on your Android device.
    • -
    • It is a fun and exciting game that will keep you entertained for hours.
    • -
    • It is a challenging game that will test your skills and reflexes.
    • -
    • It is a faithful adaptation of the movie, with authentic characters, scenes, and dialogues.
    • -
    • It is a visually stunning game, with realistic graphics and sound effects that will make you feel like you are in the middle of the action.
    • -
    -

    Features of 300 game 2 apk

    -

    Stunning graphics and sound effects

    -

    One of the most impressive features of 300 game 2 apk is its graphics and sound effects. The game uses high-quality 3D graphics that create a realistic and immersive environment. The game also uses dynamic lighting and shadows, water effects, particle effects, and animations that enhance the gameplay experience.

    -

    The sound effects are also amazing, as they match the action on the screen. You can hear the clash of swords, the roar of ships, the screams of enemies, and the voice of Themistokles as he commands his troops. The game also features an epic soundtrack that suits the mood and atmosphere of the game.

    -

    300: Seize Your Glory APK download
    -300: Rise of an Empire game for Android
    -300 game 2 apk free download
    -300 game 2 apk mod
    -300 game 2 apk offline
    -300 game 2 apk latest version
    -300 game 2 apk full version
    -300 game 2 apk unlimited money
    -300 game 2 apk obb
    -300 game 2 apk hack
    -How to install 300 game 2 apk
    -How to play 300 game 2 apk
    -Best tips and tricks for 300 game 2 apk
    -Reviews and ratings of 300 game 2 apk
    -Alternatives to 300 game 2 apk
    -Similar games to 300 game 2 apk
    -Download 300: Seize Your Glory APK for PC
    -Download 300: Seize Your Glory APK for iOS
    -Download 300: Seize Your Glory APK for Windows Phone
    -Download 300: Seize Your Glory APK for Firestick
    -Download 300: Seize Your Glory APK for Smart TV
    -Download 300: Seize Your Glory APK for Chromebook
    -Download 300: Seize Your Glory APK for Mac
    -Download 300: Seize Your Glory APK for Linux
    -Download 300: Seize Your Glory APK for Kindle Fire
    -Is 300 game 2 apk safe?
    -Is 300 game 2 apk legal?
    -Is 300 game 2 apk compatible with my device?
    -Is 300 game 2 apk updated?
    -Is 300 game 2 apk fun?
    -What is the size of 300 game 2 apk?
    -What is the genre of 300 game 2 apk?
    -What is the developer of 300 game 2 apk?
    -What is the publisher of 300 game 2 apk?
    -What is the release date of 300 game 2 apk?
    -What are the features of 300 game 2 apk?
    -What are the requirements of 300 game 2 apk?
    -What are the permissions of 300 game 2 apk?
    -What are the languages of 300 game 2 apk?
    -What are the graphics of 300 game 2 apk?
    -How to update 300 game 2 apk?
    -How to uninstall 300 game 2 apk?
    -How to backup and restore data in 300 game 2 apk?
    -How to fix errors and bugs in 300 game 2 apk?
    -How to contact support for help with issues in the app.

    -

    Intense and immersive gameplay

    -

    Another feature that makes 300 game 2 apk a great game is its gameplay. The game offers an intense and immersive gameplay that will keep you on the edge of your seat. The game has multiple levels and challenges that vary in difficulty and objectives. You have to fight your way through different scenarios, such as naval battles, beach assaults, city sieges, and more.

    -

    The game also has different modes that add variety and replay value to the game. You can play in the story mode, where you follow the plot of the movie, or in the survival mode, where you have to survive as long as possible against endless waves of enemies. You can also play in the multiplayer mode, where you can team up with other players online and compete against other teams.

    -

    Multiple levels and challenges

    -

    300 game 2 apk has multiple levels and challenges that will keep you hooked and motivated. The game has 12 levels in the story mode, each with its own objectives and enemies. You have to complete each level within a time limit and with a minimum score to unlock the next one. You also have to collect coins and gems along the way, which you can use to upgrade your weapons and skills.

    -

    The game also has various challenges that you can complete to earn extra rewards and achievements. Some of the challenges include killing a certain number of enemies, using a specific weapon, performing a combo attack, and more. The game also has a leaderboard system that ranks you based on your score and performance.

    -

    Easy controls and customization

    -

    300 game 2 apk is easy to control and customize, making it suitable for players of all ages and preferences. The game has simple and intuitive controls that allow you to move, attack, defend, and switch weapons with just a few taps and swipes on your screen. You can also adjust the sensitivity and layout of the controls according to your liking.

    -

    The game also allows you to customize your character and weapons. You can choose from different outfits, helmets, shields, swords, spears, and bows that have different stats and effects. You can also upgrade your weapons and skills by spending coins and gems that you earn in the game. You can improve your damage, speed, health, stamina, and more.

    -

    How to download and install 300 game 2 apk

    -

    Requirements and compatibility

    -

    Before you download and install 300 game 2 apk, you need to make sure that your device meets the minimum requirements and is compatible with the game. Here are the requirements and compatibility details for 300 game 2 apk:

    -
      -
    • The game requires Android 4.0 or higher to run.
    • -
    • The game requires at least 1 GB of RAM and 500 MB of free storage space.
    • -
    • The game is compatible with most Android devices, including smartphones and tablets.
    • -
    • The game is not available on Google Play Store, so you need to download it from a third-party source.
    • -
    -

    Steps to download and install

    -

    Once you have checked the requirements and compatibility of your device, you can follow these steps to download and install 300 game 2 apk:

    -
      -
    1. Go to a reliable website that offers the 300 game 2 apk file for free download. You can search for it on Google or use this link: .
    2. -
    3. Click on the download button and wait for the apk file to be downloaded on your device.
    4. -
    5. Go to your device settings and enable the option to install apps from unknown sources. This will allow you to install the apk file that is not from Google Play Store.
    6. -
    7. Go to your file manager and locate the downloaded apk file. Tap on it and follow the instructions to install it on your device.
    8. -
    9. Launch the game from your app drawer or home screen and enjoy!
    10. -
    -

    Tips and tricks to play better

    -

    If you want to play better and get higher scores in 300 game 2 apk, here are some tips and tricks that you can use:

    -
      -
    • Use different weapons for different situations. For example, use the sword for close-range combat, the spear for medium-range combat, and the bow for long-range combat.
    • -
    • Use your shield wisely. It can block incoming attacks and reflect projectiles back at your enemies.
    • -
    • Use your special skills when they are available. They can deal massive damage or give you an advantage in battle.
    • -
    • Avoid getting surrounded by enemies. Move around the battlefield and use your environment to your advantage.
    • -
    • Collect coins and gems as much as possible. They can help you upgrade your weapons and skills.
    • -
    -

    Conclusion

    -

    Summary of the main points

    -

    In conclusion, 300 game 2 apk is an epic action game that will take you to a fresh battlefield - on the sea - where you have to fight against the Persian invasion led by Artemisia. The game features stunning graphics and sound effects, intense and immersive gameplay, multiple levels and challenges, easy controls and customization, and more. The game is free to download and play on your Android device, and you can follow the steps we provided to install it. The game is a faithful adaptation of the movie, with authentic characters, scenes, and dialogues. The game is a fun and exciting game that will keep you entertained for hours. If you are a fan of action games, especially those based on historical and mythical events, then you should definitely try 300 game 2 apk.

    -

    Call to action and recommendation

    -

    Now that you have learned everything you need to know about 300 game 2 apk, what are you waiting for? Download the game today and join the epic battle on the sea. You will not regret it. You can also share the game with your friends and family and challenge them to see who can get the highest score. You can also leave your feedback and rating for the game on the website where you downloaded it. This will help the developers improve the game and make it even better.

    -

    We hope you enjoyed this article and found it helpful. If you did, please share it with others who might be interested in 300 game 2 apk. You can also check out our other articles on similar topics, such as 300 game 1 apk, 300 movie review, and more. Thank you for reading and have a great day!

    -

    FAQs

    -

    Here are some of the frequently asked questions about 300 game 2 apk:

    -

    Q: Is 300 game 2 apk safe to download and play?

    -

    A: Yes, 300 game 2 apk is safe to download and play, as long as you download it from a reliable website that offers the original and virus-free apk file. You should also scan the apk file with an antivirus software before installing it on your device.

    -

    Q: Is 300 game 2 apk offline or online?

    -

    A: 300 game 2 apk is both offline and online. You can play the game offline in the story mode and survival mode, where you don't need an internet connection. You can also play the game online in the multiplayer mode, where you need an internet connection to join other players.

    -

    Q: How can I get more coins and gems in 300 game 2 apk?

    -

    A: You can get more coins and gems in 300 game 2 apk by completing levels, challenges, and achievements in the game. You can also watch ads or make in-app purchases to get more coins and gems.

    -

    Q: How can I contact the developers of 300 game 2 apk?

    -

    A: You can contact the developers of 300 game 2 apk by visiting their official website or social media pages. You can also email them at or call them at . You can also leave your feedback and suggestions for the game on the website where you downloaded it.

    -

    Q: What are some of the best alternatives to 300 game 2 apk?

    -

    A: Some of the best alternatives to 300 game 2 apk are:

    -
      -
    • Gladiator Heroes: A strategy and action game where you build your own empire and fight against other gladiators.
    • -
    • Spartan Wars: A strategy and simulation game where you lead your own army of Spartans and conquer other lands.
    • -
    • God of War: A hack and slash game where you play as Kratos, a former Spartan warrior who seeks revenge against the gods of Olympus.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Discover Blockman Go A Sandbox Game with Millions of Games - Download on Google Play.md b/spaces/congsaPfin/Manga-OCR/logs/Discover Blockman Go A Sandbox Game with Millions of Games - Download on Google Play.md deleted file mode 100644 index d63fb931a604f7236ed839fe24c35b3d58b4d4a5..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Discover Blockman Go A Sandbox Game with Millions of Games - Download on Google Play.md +++ /dev/null @@ -1,106 +0,0 @@ - -

    Blockman Go: A Fun and Creative Sandbox Game for Android

    -

    Do you love playing games that let you explore, build, and share your own worlds? Do you enjoy meeting new friends and having fun together? If you answered yes, then you should try Blockman Go, a free app that offers a wealth of gameplay options for Android users. In this article, we will tell you what Blockman Go is, how to download it from Google Play, how to play it on your device, and why you should play it.

    -

    What is Blockman Go?

    -

    Blockman Go is a free app that includes minigames, chatting, and making friends. You can play various block style minigames here, such as Bed Wars, Sky Wars, Egg War, Anime Fighting Simulator, and more. Each game has its own rules, objectives, and rewards. You can also create your own games using the built-in editor and share them with other players.

    -

    blockman go google download


    Download 🆓 https://urlca.com/2uO7pb



    -

    Blockman Go also has a sandbox mode that lets you craft and design your own world using different blocks and items. You can build anything you can imagine, from houses and castles to cities and landscapes. You can also invite other players to visit your world and interact with them.

    -

    Blockman Go is not only a game, but also a social platform that connects you with millions of players from around the world. You can chat with them using voice or text messages, join groups and clans, customize your avatar and profile, and earn coins and gems that you can use to buy more items and outfits.

    -

    How to download Blockman Go from Google Play?

    -

    Downloading Blockman Go from Google Play is very easy and fast. Just follow these simple steps:

    -

    Step 1: Open Google Play on your device

    -

    Google Play is the official app store for Android devices. You can find it on your home screen or in your app drawer. Tap on it to open it.

    -

    Step 2: Search for Blockman Go in the search bar

    -

    Once you open Google Play, you will see a search bar at the top of the screen. Type "Blockman Go" in the search bar and tap on the magnifying glass icon to start the search.

    -

    Step 3: Tap on the install button and wait for the download to finish

    -

    You will see a list of results related to your search. Look for the one that says "Blockman Go - Apps on Google Play" with a blue icon that has a white B letter inside. Tap on it to open its page. You will see an install button at the right side of the screen. Tap on it to start the download process. Wait for a few minutes until the download is complete.

    -

    How to play Blockman Go on your device?

    -

    Playing Blockman Go on your device is very simple and fun. Just follow these easy steps:

    -

    Step 1: Launch the app and create your account

    -

    After downloading Blockman Go from Google Play, you can launch it by tapping on its icon on your home screen or in your app drawer. The first time you open it, you will need to create your account. You can choose to sign up with your email, phone number, or Facebook account. You will also need to create a username and a password. After that, you will be able to access the main menu of the app.

    -

    blockman go google play store download
    -blockman go apk download for android google
    -blockman go free download on google play
    -blockman go sandbox game download google
    -blockman go minigames google download
    -blockman go bed wars google download
    -blockman go anime all star google download
    -blockman go sky block google download
    -blockman go anime fighting simulator google download
    -blockman go trainers arena google download
    -blockman go build and shoot google download
    -blockman go wwe school simulator google download
    -blockman go egg war google download
    -blockman go free city rp google download
    -blockman go titan google download
    -blockman go jail break google download
    -blockman go frontline shooters google download
    -blockman go tnt tag google download
    -blockman go paradise island google download
    -blockman go ninja skyrim google download
    -blockman go realm city google download
    -blockman go road rash google download
    -blockman go cyberpunk google download
    -blockman go hero tycoon 2 google download
    -blockman go aliens attack google download
    -blockman go horror 1vs4 google download
    -blockman go party street google download
    -blockman go lucky block skywars google download
    -blockman go build at sea google download
    -blockman go build battle google download
    -blockman go block dungeon google download
    -blockman go the walking dead google download
    -blockman go borderline google download
    -blockman go gem knight google download
    -blockman go rainbow parkour google download
    -blockman go ender vs slender google download
    -blockman go bmg football google download
    -blockman go ludo google download
    -blockman go social hall google download
    -blockman go the exorcists google download
    -blockman go glory of hero google download
    -blockman go outlast the dawn google download
    -blockman go jumping holes google download
    -blockman go duel in the forbidden google download
    -garena blockman go apk free download for android from Google Play Store

    -

    Step 2: Customize your character and choose your game mode

    -

    Before you start playing, you can customize your character by changing its appearance, clothes, accessories, and more. You can also buy more items and outfits using the coins and gems that you earn by playing games. To customize your character, tap on the avatar icon at the bottom left corner of the screen.

    -

    After customizing your character, you can choose which game mode you want to play. There are two main modes: minigames and sandbox. To choose a game mode, tap on the game icon at the bottom right corner of the screen. You will see a list of available games that you can join or create. You can also filter the games by category, region, language, and more.

    -

    Step 3: Join a game or create your own world

    -

    If you want to join a game, you can browse the list of games and tap on the one that interests you. You will see the details of the game, such as the name, the creator, the players, the rules, and the rewards. You can also chat with other players in the lobby before the game starts. To join a game, tap on the join button at the bottom of the screen.

    -

    If you want to create your own world, you can tap on the create button at the top of the screen. You will be able to choose from different templates or start from scratch. You can also edit the settings of your world, such as the name, the description, the password, the max players, and more. To create your own world, tap on the confirm button at the bottom of the screen.

    -

    Why should you play Blockman Go?

    -

    Blockman Go is a fun and creative sandbox game for Android that offers many benefits for its players. Here are some of the reasons why you should play Blockman Go:

    -

    It offers a variety of fun and engaging games

    -

    Blockman Go has a wide range of minigames that cater to different tastes and preferences. Whether you like action, adventure, strategy, or simulation games, you will find something that suits you in Blockman Go. You can also challenge yourself by competing with other players and earning rewards.

    -

    It allows you to express your creativity and imagination

    -

    Blockman Go has a sandbox mode that lets you unleash your creativity and imagination by building your own world using different blocks and items. You can also share your world with other players and see what they have created. You can also learn from other players and get inspired by their creations.

    -

    It connects you with other players from around the world

    -

    Blockman Go is not only a game, but also a social platform that connects you with millions of players from around the world. You can chat with them using voice or text messages, join groups and clans, customize your avatar and profile, and earn coins and gems that you can use to buy more items and outfits. You can also make new friends and have fun together.

    -

    Conclusion

    -

    Blockman Go is a free app that includes minigames, chatting, and making friends for Android users. It is a fun and creative sandbox game that lets you explore, build, and share your own worlds. It is also a social platform that connects you with other players from around the world. If you are looking for a game that offers a wealth of gameplay options, then you should download Blockman Go from Google Play and start playing today.

    -

    Frequently Asked Questions

    -

    Q: How do I update Blockman Go?

    -

    A: To update Blockman Go, you need to open Google Play on your device and search for Blockman Go in the search bar. Then, tap on the update button next to Blockman Go and wait for the update to finish.

    -

    Q: How do I report a bug or a problem in Blockman Go?

    -

    A: To report a bug or a problem in Blockman Go, you need to tap on the settings icon at the top right corner of the screen and then tap on feedback. Then, fill out the form with your email address, your problem description, and any screenshots or videos that can help explain your issue. Then, tap on submit to send your feedback.

    -

    Q: How do I get more coins and gems in Blockman Go?

    -

    A: To get more coins and gems in Blockman Go, you can

    A: To get more coins and gems in Blockman Go, you can do the following things:

    -
      -
    • Play more games and complete more tasks to earn rewards.
    • -
    • Watch ads and videos to get free coins and gems.
    • -
    • Invite your friends to join Blockman Go and get referral bonuses.
    • -
    • Buy coins and gems using real money through in-app purchases.
    • -
    -

    Q: How do I delete my account in Blockman Go?

    -

    A: To delete your account in Blockman Go, you need to contact the customer service team by sending an email to service@blockmango.net. You will need to provide your username, your email address, and your reason for deleting your account. The customer service team will process your request and delete your account within 24 hours.

    -

    Q: How do I contact the Blockman Go team?

    -

    A: To contact the Blockman Go team, you can use one of the following methods:

    -
      -
    • Email: service@blockmango.net
    • -
    • Facebook: https://www.facebook.com/BlockmanGo
    • -
    • Twitter: https://twitter.com/BlockmanGo
    • -
    • Instagram: https://www.instagram.com/blockmangoofficial
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download X Prem Full Movie in Dual Audio Hindi Bengali - 9xmovies.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download X Prem Full Movie in Dual Audio Hindi Bengali - 9xmovies.md deleted file mode 100644 index 79c9fc8382eaf330d8fb088f89d5ec77926da423..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Download X Prem Full Movie in Dual Audio Hindi Bengali - 9xmovies.md +++ /dev/null @@ -1,117 +0,0 @@ -
    -

    X Prem Full Movie Download 9xmovies: Is It Safe and Legal?

    -

    X Prem is a romantic drama movie directed by Srijit Mukherji and starring Arjun Chakraborty, Shruti Das, and Madhurima Basak. The movie revolves around a young couple, Khilat and Joyee, who face a tragic incident when the former loses the memory of love. He resorts to scientific methods to retrieve the lost memory and reunite with her. Will they find their happily ever after?

    -

    x prem full movie download 9xmovies


    Download Filehttps://urlca.com/2uObh4



    -

    The movie was released on June 3, 2023 in India and received positive reviews from critics and audiences. The movie is also available on Amazon Prime Video for streaming. However, some people may want to download X Prem full movie for free from illegal websites like 9xmovies. But is it safe and legal to do so? Let's find out in this article.

    -

    What is 9xmovies Website?

    -

    9xmovies is a notorious website that offers free downloads of movies, web series, TV shows, and documentaries in various languages and formats. The website has a huge collection of content from Bollywood, Hollywood, Tollywood, Kollywood, and other regional industries. The website also provides dubbed and dual audio versions of movies for the convenience of the users.

    -

    9xmovies website is very popular among movie lovers who want to watch the latest releases without paying any subscription fees or buying tickets. The website updates its content regularly and provides multiple download options based on the resolutions. The website also has a user-friendly interface and easy navigation.

    -

    x prem full movie download filmywap
    -x prem full movie download tamilrockers
    -x prem full movie download 480p
    -x prem full movie download 720p
    -x prem full movie download 1080p
    -x prem full movie download hoichoi
    -x prem full movie download filmyzilla
    -x prem full movie download bengali
    -x prem full movie download srijit mukherji
    -x prem full movie download dual audio
    -x prem full movie download torrent
    -x prem full movie download hd
    -x prem full movie download free
    -x prem full movie download online
    -x prem full movie download watch online
    -x prem full movie download link
    -x prem full movie download google drive
    -x prem full movie download telegram
    -x prem full movie download mp4
    -x prem full movie download mkv
    -x prem full movie download dvdrip
    -x prem full movie download bluray
    -x prem full movie download filmyhit
    -x prem full movie download khatrimaza
    -x prem full movie download worldfree4u
    -x prem full movie download bolly4u
    -x prem full movie download pagalworld
    -x prem full movie download moviesda
    -x prem full movie download movierulz
    -x prem full movie download isaimini
    -x prem full movie download tamilyogi
    -x prem full movie download tamilgun
    -x prem full movie download filmywap.com
    -x prem full movie download tamilrockers.com
    -x prem full movie download hoichoi.tv
    -x prem full movie download filmyzilla.com
    -x prem full movie download filmyzap.com
    -x prem full movie download realjankari23.com
    -x=prem 2022 bengali romantic drama film directed by srijit mukherji
    -richard bhakti klein shruti das anindya sengupta madhurima basak arjun chakraborty star in the lead roles of the film

    -

    How to Download X Prem Full Movie from 9xmovies?

    -

    If you want to download X Prem full movie from 9xmovies website, you need to follow these steps:

    -
      -
    1. Visit the official website of 9xmovies or any of its proxy or mirror sites.
    2. -
    3. Search for X Prem movie in the search bar or browse through the categories.
    4. -
    5. Select the movie from the search results and click on it.
    6. -
    7. Scroll down to the bottom of the page and find the download links.
    8. -
    9. Choose your preferred download option based on the resolution and click on it.
    10. -
    11. Wait for a few seconds until the download link is generated.
    12. -
    13. Click on the download link and save the file to your device.
    14. -
    -

    Congratulations! You have successfully downloaded X Prem full movie from 9xmovies website.

    -

    Is It Safe and Legal to Download X Prem Full Movie from 9xmovies?

    -

    The answer is NO. It is neither safe nor legal to download X Prem full movie from 9xmovies website. Here are some of the reasons why you should avoid downloading X Prem full movie from 9xmovies website:

    -
      -
    • Malware and Viruses: 9xmovies website may contain malicious ads, pop-ups, redirects, and links that can infect your device with malware and viruses. These can harm your device's performance, security, and privacy. They can also steal your personal information, such as passwords, bank details, credit card numbers, etc.
    • -
    • Legal Actions: 9xmovies website violates the copyright laws and piracy regulations of India and other countries. The makers and distributors of X Prem movie have the legal rights to the movie and its distribution. Downloading X Prem full movie from 9xmovies website is an act of piracy and can land you in legal trouble. You may face fines, penalties, or even imprisonment for violating the law.
    • -
    • Ethical Issues: 9xmovies website harms the film industry and the livelihoods of the people involved in it. The movie makers and actors invest a lot of time, money, and effort to create quality content for the audience. Downloading X Prem full movie from 9xmovies website deprives them of their rightful earnings and recognition. It also discourages them from making more movies in the future. By downloading X Prem full movie from 9xmovies website, you are supporting piracy and disrespecting the hard work of the creators.
    • -
    -

    Therefore, it is better to avoid downloading X Prem full movie from 9xmovies website and watch it legally and safely from authorized sources.

    -

    What are the Alternatives to 9xmovies Website?

    -

    If you want to watch X Prem full movie online, you don't need to resort to illegal websites like 9xmovies. There are many legal and safe alternatives to 9xmovies website that offer high-quality streaming and downloading services. Some of them are:

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypePriceFeatures
    Amazon Prime VideoStreaming Platform$12.99 per month or $119 per year- Unlimited access to movies, TV shows, web series, and documentaries.
    - Ad-free and HD quality streaming.
    - Offline download option.
    - Exclusive original content.
    - Free delivery and other benefits for Prime members.
    X Prem Official WebsiteOfficial WebsiteFree- Access to X Prem movie trailer, songs, behind-the-scenes, and other information.
    - Links to authorized streaming and downloading platforms.
    - Updates on X Prem movie news and events.
    X Prem Authorized DistributorsAuthorized DistributorsVaries depending on the distributor- Access to X Prem movie in different languages and formats.
    - Legal and safe download option.
    - Support for the film industry and the creators.
    -

    These are some of the alternatives to 9xmovies website that you can use to watch X Prem full movie online legally and safely. You can also check out other streaming platforms, official websites, and authorized distributors that offer X Prem movie online.

    -

    Conclusion

    -

    X Prem is a romantic drama movie that tells the story of a young couple who face a tragic incident that erases the memory of love. The movie is directed by Srijit Mukherji and stars Arjun Chakraborty, Shruti Das, and Madhurima Basak. The movie was released on June 3, 2023 in India and is also available on Amazon Prime Video for streaming.

    -

    However, some people may want to download X Prem full movie for free from illegal websites like 9xmovies. But this is not a safe or legal option as it can expose you to malware, viruses, legal actions, and ethical issues. It can also harm the film industry and the livelihoods of the people involved in it.

    -

    Therefore, it is better to avoid downloading X Prem full movie from 9xmovies website and watch it legally and safely from authorized sources. You can use streaming platforms, official websites, or authorized distributors to watch X Prem full movie online. This way, you can enjoy the movie without any risks or consequences.

    -

    FAQs

    -

    Here are some frequently asked questions and answers related to X Prem full movie download 9xmovies:

    -

    Q: Is X Prem movie based on a true story?

    -

    A: No, X Prem movie is not based on a true story. It is a fictional story written by Srijit Mukherji.

    -

    Q: Who composed the music for X Prem movie?

    -

    A: The music for X Prem movie was composed by Anupam Roy.

    -

    Q: How long is X Prem movie?

    -

    A: X Prem movie has a runtime of 2 hours and 10 minutes.

    -

    Q: Where can I watch X Prem movie with subtitles?

    -

    A: You can watch X Prem movie with subtitles on Amazon Prime Video or other streaming platforms that offer subtitle options.Q: What is the rating of X Prem movie? -

    A: X Prem movie has a rating of 7.8 out of 10 on IMDb and 4 out of 5 on Times of India.

    -

    Q: Is X Prem movie suitable for children?

    -

    A: X Prem movie is rated U/A by the Central Board of Film Certification, which means that it is suitable for viewers of all ages, but parental guidance is advised for children below the age of 12.

    -

    I hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and have a great day!

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/3DMGAME Mortal Kombat Komplete Edition Update 1 and Crack By 3DM Download and Play the Ultimate Fighting Game.md b/spaces/contluForse/HuggingGPT/assets/3DMGAME Mortal Kombat Komplete Edition Update 1 and Crack By 3DM Download and Play the Ultimate Fighting Game.md deleted file mode 100644 index f72c2abf23d0a01c6d43979ddd2482b03b103801..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/3DMGAME Mortal Kombat Komplete Edition Update 1 and Crack By 3DM Download and Play the Ultimate Fighting Game.md +++ /dev/null @@ -1,6 +0,0 @@ -

    3DMGAME Mortal Kombat Komplete Edition Update 1 and Crack By 3DM the game


    DOWNLOAD - https://ssurll.com/2uzyRR



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py deleted file mode 100644 index b45e758ac6cf8dfb0382d072fe09125bc7e9b888..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -from torch import nn -from torch.nn import functional as F - -from .registry import CONV_LAYERS - - -@CONV_LAYERS.register_module() -class Conv2dAdaptivePadding(nn.Conv2d): - """Implementation of 2D convolution in tensorflow with `padding` as "same", - which applies padding to input (if needed) so that input image gets fully - covered by filter and stride you specified. For stride 1, this will ensure - that output image size is same as input. For stride of 2, output dimensions - will be half, for example. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution. Default: 1 - padding (int or tuple, optional): Zero-padding added to both sides of - the input. Default: 0 - dilation (int or tuple, optional): Spacing between kernel elements. - Default: 1 - groups (int, optional): Number of blocked connections from input - channels to output channels. Default: 1 - bias (bool, optional): If ``True``, adds a learnable bias to the - output. Default: ``True`` - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True): - super().__init__(in_channels, out_channels, kernel_size, stride, 0, - dilation, groups, bias) - - def forward(self, x): - img_h, img_w = x.size()[-2:] - kernel_h, kernel_w = self.weight.size()[-2:] - stride_h, stride_w = self.stride - output_h = math.ceil(img_h / stride_h) - output_w = math.ceil(img_w / stride_w) - pad_h = ( - max((output_h - 1) * self.stride[0] + - (kernel_h - 1) * self.dilation[0] + 1 - img_h, 0)) - pad_w = ( - max((output_w - 1) * self.stride[1] + - (kernel_w - 1) * self.dilation[1] + 1 - img_w, 0)) - if pad_h > 0 or pad_w > 0: - x = F.pad(x, [ - pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2 - ]) - return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, - self.dilation, self.groups) diff --git a/spaces/d0r1h/LegSum/app.py b/spaces/d0r1h/LegSum/app.py deleted file mode 100644 index a3001b7f15bcf9a7fefb59cfea33562cd248f89e..0000000000000000000000000000000000000000 --- a/spaces/d0r1h/LegSum/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import gradio as gr -from Summarizer.Extractive import summarize - -description = """ -
    -LegSum is a tool to summarize legal proceedings such as judgment, bills using classical and SOTA models -
    -""" -article="

    Created by Pawan Trivedi 2022 | GitHub

    " - -interface = gr.Interface(fn = summarize, - inputs = [gr.inputs.File(), - gr.inputs.Radio(['Pegasus', 'Distill' , 'LEDBill', 'ILC', 'BERT', 'Lsa', 'SumBasic', 'TextRank'], type="value", label='Model')], - outputs = "text", - title = "LegSum", - description=description, - article = article) - -interface.launch(debug=True, - enable_queue=True) \ No newline at end of file diff --git a/spaces/datasciencedojo/Finger-Counting-Right-Hand/app.py b/spaces/datasciencedojo/Finger-Counting-Right-Hand/app.py deleted file mode 100644 index 8309c3f73e79029966496b238af1197306110298..0000000000000000000000000000000000000000 --- a/spaces/datasciencedojo/Finger-Counting-Right-Hand/app.py +++ /dev/null @@ -1,231 +0,0 @@ -import cv2 -import time -import os -import mediapipe as mp -import gradio as gr -from threading import Thread -#from cvzone.HandTrackingModule import HandDetector -example_flag = False - -class handDetector(): - def __init__(self, mode=True, modelComplexity=1, maxHands=2, detectionCon=0.5, trackCon=0.5): - self.mode = mode - self.maxHands = maxHands - self.detectionCon = detectionCon - self.modelComplex = modelComplexity - self.trackCon = trackCon - self.mpHands = mp.solutions.hands - self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.modelComplex,self.detectionCon, self.trackCon) - self.mpDraw = mp.solutions.drawing_utils - - def findHands(self, img, draw=True,flipType=True): - """ - Finds hands in a BGR image. - :param img: Image to find the hands in. - :param draw: Flag to draw the output on the image. - :return: Image with or without drawings - """ - imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - #cv2.imshow('test',imgRGB) - self.results = self.hands.process(imgRGB) - allHands = [] - h, w, c = img.shape - if self.results.multi_hand_landmarks: - for handType, handLms in zip(self.results.multi_handedness, self.results.multi_hand_landmarks): - myHand = {} - ## lmList - mylmList = [] - xList = [] - yList = [] - for id, lm in enumerate(handLms.landmark): - px, py, pz = int(lm.x * w), int(lm.y * h), int(lm.z * w) - mylmList.append([px, py, pz]) - xList.append(px) - yList.append(py) - - ## bbox - xmin, xmax = min(xList), max(xList) - ymin, ymax = min(yList), max(yList) - boxW, boxH = xmax - xmin, ymax - ymin - bbox = xmin, ymin, boxW, boxH - cx, cy = bbox[0] + (bbox[2] // 2), \ - bbox[1] + (bbox[3] // 2) - - myHand["lmList"] = mylmList - myHand["bbox"] = bbox - myHand["center"] = (cx, cy) - - if flipType: - if handType.classification[0].label == "Right": - myHand["type"] = "Left" - else: - myHand["type"] = "Right" - else: - myHand["type"] = handType.classification[0].label - allHands.append(myHand) - - ## draw - if draw: - self.mpDraw.draw_landmarks(img, handLms, - self.mpHands.HAND_CONNECTIONS) - cv2.rectangle(img, (bbox[0] - 20, bbox[1] - 20), - (bbox[0] + bbox[2] + 20, bbox[1] + bbox[3] + 20), - (255, 0, 255), 2) - #cv2.putText(img, myHand["type"], (bbox[0] - 30, bbox[1] - 30), cv2.FONT_HERSHEY_PLAIN,2, (255, 0, 255), 2) - if draw: - return allHands, img - else: - return allHands - def findPosition(self, img, handNo=0, draw=True,flipType=False): - - lmList = [] - if self.results.multi_hand_landmarks: - myHand = self.results.multi_hand_landmarks[handNo] - for id, lm in enumerate(myHand.landmark): - # print(id, lm) - h, w, c = img.shape - cx, cy = int(lm.x * w), int(lm.y * h) - # print(id, cx, cy) - lmList.append([id, cx, cy]) - if draw: - cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED) - return lmList - - - - -def set_example_image(example: list) -> dict: - return gr.inputs.Image.update(value=example[0]) - - -def count(im): - folderPath = "Count" - myList = os.listdir(folderPath) - overlayList = [] - for imPath in sorted(myList): - image = cv2.imread(f'{folderPath}/{imPath}') - # print(f'{folderPath}/{imPath}') - overlayList.append(image) - - #print(len(overlayList)) - tipIds = [4, 8, 12, 16, 20] - detector = handDetector(detectionCon=0.75) - - #img = cv2.imread('test.jpg') - allhands,img = detector.findHands(cv2.flip(im[:,:,::-1], 1)) - cv2.imwrite('test3.png',img) - - lmList = detector.findPosition(img, draw=False,) - # print(lmList) - - if len(lmList) != 0: - fingers = [] - - # Thumb - if lmList[tipIds[0]][1] > lmList[tipIds[0] - 1][1]: - fingers.append(1) - else: - fingers.append(0) - - # 4 Fingers - for id in range(1, 5): - if lmList[tipIds[id]][2] < lmList[tipIds[id] - 2][2]: - fingers.append(1) - else: - fingers.append(0) - - # print(fingers) - totalFingers = fingers.count(1) - #print(totalFingers) - text = f"Total finger count is {totalFingers}!" - - h, w, c = overlayList[totalFingers - 1].shape - img = cv2.flip(img,1) - img[0:h, 0:w] = overlayList[totalFingers - 1] - - - cv2.rectangle(img, (20, 225), (170, 425), (0, 255, 0), cv2.FILLED) - cv2.putText(img, str(totalFingers), (45, 375), cv2.FONT_HERSHEY_PLAIN, - 10, (255, 0, 0), 25) - return img[:,:,::-1] - else: - return cv2.flip(img[:,:,::-1],1) - -css = """ -.gr-button-lg { - z-index: 14; - width: 113px; - height: 30px; - left: 0px; - top: 0px; - padding: 0px; - cursor: pointer !important; - background: none rgb(17, 20, 45) !important; - border: none !important; - text-align: center !important; - font-size: 14px !important; - font-weight: 500 !important; - color: rgb(255, 255, 255) !important; - line-height: 1 !important; - border-radius: 6px !important; - transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important; - box-shadow: none !important; -} -.gr-button-lg:hover{ - z-index: 14; - width: 113px; - height: 30px; - left: 0px; - top: 0px; - padding: 0px; - cursor: pointer !important; - background: none rgb(37, 56, 133) !important; - border: none !important; - text-align: center !important; - font-size: 14px !important; - font-weight: 500 !important; - color: rgb(255, 255, 255) !important; - line-height: 1 !important; - border-radius: 6px !important; - transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important; - box-shadow: rgb(0 0 0 / 23%) 0px 1px 7px 0px !important; -} - -footer {display:none !important} -.output-markdown{display:none !important} -#out_image {height: 22rem !important;} - -""" - -with gr.Blocks(title="Right Hand Finger Counting | Data Science Dojo", css=css) as demo: - with gr.Tabs(): - with gr.TabItem('Upload'): - with gr.Row(): - with gr.Column(): - img_input = gr.Image(shape=(640,480)) - image_button = gr.Button("Submit") - - with gr.Column(): - output = gr.Image(shape=(640,480), elem_id="out_image") - with gr.Row(): - example_images = gr.Dataset(components=[img_input],samples=[["ex2.jpg"]]) - - with gr.TabItem('Webcam'): - with gr.Row(): - with gr.Column(): - img_input2 = gr.Webcam() - image_button2 = gr.Button("Submit") - - with gr.Column(): - output2 = gr.outputs.Image() - - image_button.click(fn=count, - inputs = img_input, - outputs = output) - image_button2.click(fn=count, - inputs = img_input2, - outputs = output2) - example_images.click(fn=set_example_image,inputs=[example_images],outputs=[img_input]) - - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/davda54/chat-nort5/question_detection_norbert3_small/modeling_norbert.py b/spaces/davda54/chat-nort5/question_detection_norbert3_small/modeling_norbert.py deleted file mode 100644 index c802871db6175aa9560eb1c72a732c0297e5751f..0000000000000000000000000000000000000000 --- a/spaces/davda54/chat-nort5/question_detection_norbert3_small/modeling_norbert.py +++ /dev/null @@ -1,657 +0,0 @@ -import math -from typing import List, Optional, Tuple, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.utils import checkpoint - -from configuration_norbert import NorbertConfig -from transformers.modeling_utils import PreTrainedModel -from transformers.activations import gelu_new -from transformers.modeling_outputs import ( - MaskedLMOutput, - MultipleChoiceModelOutput, - QuestionAnsweringModelOutput, - SequenceClassifierOutput, - TokenClassifierOutput, - BaseModelOutput -) -from transformers.pytorch_utils import softmax_backward_data - - -class Encoder(nn.Module): - def __init__(self, config, activation_checkpointing=False): - super().__init__() - self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.num_hidden_layers)]) - - for i, layer in enumerate(self.layers): - layer.mlp.mlp[1].weight.data *= math.sqrt(1.0 / (2.0 * (1 + i))) - layer.mlp.mlp[-2].weight.data *= math.sqrt(1.0 / (2.0 * (1 + i))) - - self.activation_checkpointing = activation_checkpointing - - def forward(self, hidden_states, attention_mask, relative_embedding): - hidden_states, attention_probs = [hidden_states], [] - - for layer in self.layers: - if self.activation_checkpointing: - hidden_state, attention_p = checkpoint.checkpoint(layer, hidden_states[-1], attention_mask, relative_embedding) - else: - hidden_state, attention_p = layer(hidden_states[-1], attention_mask, relative_embedding) - - hidden_states.append(hidden_state) - attention_probs.append(attention_p) - - return hidden_states, attention_probs - - -class MaskClassifier(nn.Module): - def __init__(self, config, subword_embedding): - super().__init__() - self.nonlinearity = nn.Sequential( - nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False), - nn.Linear(config.hidden_size, config.hidden_size), - nn.GELU(), - nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False), - nn.Dropout(config.hidden_dropout_prob), - nn.Linear(subword_embedding.size(1), subword_embedding.size(0)) - ) - self.initialize(config.hidden_size, subword_embedding) - - def initialize(self, hidden_size, embedding): - std = math.sqrt(2.0 / (5.0 * hidden_size)) - nn.init.trunc_normal_(self.nonlinearity[1].weight, mean=0.0, std=std, a=-2*std, b=2*std) - self.nonlinearity[-1].weight = embedding - self.nonlinearity[1].bias.data.zero_() - self.nonlinearity[-1].bias.data.zero_() - - def forward(self, x, masked_lm_labels=None): - if masked_lm_labels is not None: - x = torch.index_select(x.flatten(0, 1), 0, torch.nonzero(masked_lm_labels.flatten() != -100).squeeze()) - x = self.nonlinearity(x) - return x - - -class EncoderLayer(nn.Module): - def __init__(self, config): - super().__init__() - self.attention = Attention(config) - self.mlp = FeedForward(config) - - def forward(self, x, padding_mask, relative_embedding): - attention_output, attention_probs = self.attention(x, padding_mask, relative_embedding) - x = x + attention_output - x = x + self.mlp(x) - return x, attention_probs - - -class GeGLU(nn.Module): - def forward(self, x): - x, gate = x.chunk(2, dim=-1) - x = x * gelu_new(gate) - return x - - -class FeedForward(nn.Module): - def __init__(self, config): - super().__init__() - self.mlp = nn.Sequential( - nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=False), - nn.Linear(config.hidden_size, 2*config.intermediate_size, bias=False), - GeGLU(), - nn.LayerNorm(config.intermediate_size, eps=config.layer_norm_eps, elementwise_affine=False), - nn.Linear(config.intermediate_size, config.hidden_size, bias=False), - nn.Dropout(config.hidden_dropout_prob) - ) - self.initialize(config.hidden_size) - - def initialize(self, hidden_size): - std = math.sqrt(2.0 / (5.0 * hidden_size)) - nn.init.trunc_normal_(self.mlp[1].weight, mean=0.0, std=std, a=-2*std, b=2*std) - nn.init.trunc_normal_(self.mlp[-2].weight, mean=0.0, std=std, a=-2*std, b=2*std) - - def forward(self, x): - return self.mlp(x) - - -class MaskedSoftmax(torch.autograd.Function): - @staticmethod - def forward(self, x, mask, dim): - self.dim = dim - x.masked_fill_(mask, float('-inf')) - x = torch.softmax(x, self.dim) - x.masked_fill_(mask, 0.0) - self.save_for_backward(x) - return x - - @staticmethod - def backward(self, grad_output): - output, = self.saved_tensors - input_grad = softmax_backward_data(self, grad_output, output, self.dim, output) - return input_grad, None, None - - -class Attention(nn.Module): - def __init__(self, config): - super().__init__() - - self.config = config - - if config.hidden_size % config.num_attention_heads != 0: - raise ValueError(f"The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}") - - self.hidden_size = config.hidden_size - self.num_heads = config.num_attention_heads - self.head_size = config.hidden_size // config.num_attention_heads - - self.in_proj_qk = nn.Linear(config.hidden_size, 2*config.hidden_size, bias=True) - self.in_proj_v = nn.Linear(config.hidden_size, config.hidden_size, bias=True) - self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True) - - self.pre_layer_norm = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False) - self.post_layer_norm = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True) - - position_indices = torch.arange(config.max_position_embeddings, dtype=torch.long).unsqueeze(1) \ - - torch.arange(config.max_position_embeddings, dtype=torch.long).unsqueeze(0) - position_indices = self.make_log_bucket_position(position_indices, config.position_bucket_size, config.max_position_embeddings) - position_indices = config.position_bucket_size - 1 + position_indices - self.register_buffer("position_indices", position_indices, persistent=True) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - self.scale = 1.0 / math.sqrt(3 * self.head_size) - self.initialize() - - def make_log_bucket_position(self, relative_pos, bucket_size, max_position): - sign = torch.sign(relative_pos) - mid = bucket_size // 2 - abs_pos = torch.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, torch.abs(relative_pos).clamp(max=max_position - 1)) - log_pos = torch.ceil(torch.log(abs_pos / mid) / math.log((max_position-1) / mid) * (mid - 1)).int() + mid - bucket_pos = torch.where(abs_pos <= mid, relative_pos, log_pos * sign).long() - return bucket_pos - - def initialize(self): - std = math.sqrt(2.0 / (5.0 * self.hidden_size)) - nn.init.trunc_normal_(self.in_proj_qk.weight, mean=0.0, std=std, a=-2*std, b=2*std) - nn.init.trunc_normal_(self.in_proj_v.weight, mean=0.0, std=std, a=-2*std, b=2*std) - nn.init.trunc_normal_(self.out_proj.weight, mean=0.0, std=std, a=-2*std, b=2*std) - self.in_proj_qk.bias.data.zero_() - self.in_proj_v.bias.data.zero_() - self.out_proj.bias.data.zero_() - - def compute_attention_scores(self, hidden_states, relative_embedding): - key_len, batch_size, _ = hidden_states.size() - query_len = key_len - - if self.position_indices.size(0) < query_len: - position_indices = torch.arange(query_len, dtype=torch.long).unsqueeze(1) \ - - torch.arange(query_len, dtype=torch.long).unsqueeze(0) - position_indices = self.make_log_bucket_position(position_indices, self.position_bucket_size, 512) - position_indices = self.position_bucket_size - 1 + position_indices - self.position_indices = position_indices.to(hidden_states.device) - - hidden_states = self.pre_layer_norm(hidden_states) - - query, key = self.in_proj_qk(hidden_states).chunk(2, dim=2) # shape: [T, B, D] - value = self.in_proj_v(hidden_states) # shape: [T, B, D] - - query = query.reshape(query_len, batch_size * self.num_heads, self.head_size).transpose(0, 1) - key = key.reshape(key_len, batch_size * self.num_heads, self.head_size).transpose(0, 1) - value = value.view(key_len, batch_size * self.num_heads, self.head_size).transpose(0, 1) - - attention_scores = torch.bmm(query, key.transpose(1, 2) * self.scale) - - pos = self.in_proj_qk(self.dropout(relative_embedding)) # shape: [2T-1, 2D] - query_pos, key_pos = pos.view(-1, self.num_heads, 2*self.head_size).chunk(2, dim=2) - query = query.view(batch_size, self.num_heads, query_len, self.head_size) - key = key.view(batch_size, self.num_heads, query_len, self.head_size) - - attention_c_p = torch.einsum("bhqd,khd->bhqk", query, key_pos.squeeze(1) * self.scale) - attention_p_c = torch.einsum("bhkd,qhd->bhqk", key * self.scale, query_pos.squeeze(1)) - - position_indices = self.position_indices[:query_len, :key_len].expand(batch_size, self.num_heads, -1, -1) - attention_c_p = attention_c_p.gather(3, position_indices) - attention_p_c = attention_p_c.gather(2, position_indices) - - attention_scores = attention_scores.view(batch_size, self.num_heads, query_len, key_len) - attention_scores.add_(attention_c_p) - attention_scores.add_(attention_p_c) - - return attention_scores, value - - def compute_output(self, attention_probs, value): - attention_probs = self.dropout(attention_probs) - context = torch.bmm(attention_probs.flatten(0, 1), value) # shape: [B*H, Q, D] - context = context.transpose(0, 1).reshape(context.size(1), -1, self.hidden_size) # shape: [Q, B, H*D] - context = self.out_proj(context) - context = self.post_layer_norm(context) - context = self.dropout(context) - return context - - def forward(self, hidden_states, attention_mask, relative_embedding): - attention_scores, value = self.compute_attention_scores(hidden_states, relative_embedding) - attention_probs = MaskedSoftmax.apply(attention_scores, attention_mask, -1) - return self.compute_output(attention_probs, value), attention_probs.detach() - - -class Embedding(nn.Module): - def __init__(self, config): - super().__init__() - self.hidden_size = config.hidden_size - - self.word_embedding = nn.Embedding(config.vocab_size, config.hidden_size) - self.word_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=False) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - self.relative_embedding = nn.Parameter(torch.empty(2 * config.position_bucket_size - 1, config.hidden_size)) - self.relative_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - self.initialize() - - def initialize(self): - std = math.sqrt(2.0 / (5.0 * self.hidden_size)) - nn.init.trunc_normal_(self.relative_embedding, mean=0.0, std=std, a=-2*std, b=2*std) - nn.init.trunc_normal_(self.word_embedding.weight, mean=0.0, std=std, a=-2*std, b=2*std) - - def forward(self, input_ids): - word_embedding = self.dropout(self.word_layer_norm(self.word_embedding(input_ids))) - relative_embeddings = self.relative_layer_norm(self.relative_embedding) - return word_embedding, relative_embeddings - - -# -# HuggingFace wrappers -# - -class NorbertPreTrainedModel(PreTrainedModel): - config_class = NorbertConfig - base_model_prefix = "norbert3" - supports_gradient_checkpointing = True - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, Encoder): - module.activation_checkpointing = value - - def _init_weights(self, module): - pass # everything is already initialized - - -class NorbertModel(NorbertPreTrainedModel): - def __init__(self, config, add_mlm_layer=False): - super().__init__(config) - self.config = config - - self.embedding = Embedding(config) - self.transformer = Encoder(config, activation_checkpointing=False) - self.classifier = MaskClassifier(config, self.embedding.word_embedding.weight) if add_mlm_layer else None - - def get_input_embeddings(self): - return self.embedding.word_embedding - - def set_input_embeddings(self, value): - self.embedding.word_embedding = value - - def get_contextualized_embeddings( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None - ) -> List[torch.Tensor]: - if input_ids is not None: - input_shape = input_ids.size() - else: - raise ValueError("You have to specify input_ids") - - batch_size, seq_length = input_shape - device = input_ids.device - - if attention_mask is None: - attention_mask = torch.zeros(batch_size, seq_length, dtype=torch.bool, device=device) - else: - attention_mask = ~attention_mask.bool() - attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) - - static_embeddings, relative_embedding = self.embedding(input_ids.t()) - contextualized_embeddings, attention_probs = self.transformer(static_embeddings, attention_mask, relative_embedding) - contextualized_embeddings = [e.transpose(0, 1) for e in contextualized_embeddings] - last_layer = contextualized_embeddings[-1] - contextualized_embeddings = [contextualized_embeddings[0]] + [ - contextualized_embeddings[i] - contextualized_embeddings[i - 1] - for i in range(1, len(contextualized_embeddings)) - ] - return last_layer, contextualized_embeddings, attention_probs - - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - output_attentions: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.Tensor], BaseModelOutput]: - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask) - - if not return_dict: - return ( - sequence_output, - *([contextualized_embeddings] if output_hidden_states else []), - *([attention_probs] if output_attentions else []) - ) - - return BaseModelOutput( - last_hidden_state=sequence_output, - hidden_states=contextualized_embeddings if output_hidden_states else None, - attentions=attention_probs if output_attentions else None - ) - - -class NorbertForMaskedLM(NorbertModel): - _keys_to_ignore_on_load_unexpected = ["head"] - - def __init__(self, config): - super().__init__(config, add_mlm_layer=True) - - def get_output_embeddings(self): - return self.classifier.nonlinearity[-1].weight - - def set_output_embeddings(self, new_embeddings): - self.classifier.nonlinearity[-1].weight = new_embeddings - - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - output_attentions: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: Optional[torch.LongTensor] = None, - ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask) - subword_prediction = self.classifier(sequence_output) - subword_prediction[:, :, :106+1] = float("-inf") - - masked_lm_loss = None - if labels is not None: - masked_lm_loss = F.cross_entropy(subword_prediction.flatten(0, 1), labels.flatten()) - - if not return_dict: - output = ( - subword_prediction, - *([contextualized_embeddings] if output_hidden_states else []), - *([attention_probs] if output_attentions else []) - ) - return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output - - return MaskedLMOutput( - loss=masked_lm_loss, - logits=subword_prediction, - hidden_states=contextualized_embeddings if output_hidden_states else None, - attentions=attention_probs if output_attentions else None - ) - - -class Classifier(nn.Module): - def __init__(self, config, num_labels: int): - super().__init__() - - drop_out = getattr(config, "cls_dropout", None) - drop_out = config.hidden_dropout_prob if drop_out is None else drop_out - - self.nonlinearity = nn.Sequential( - nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False), - nn.Linear(config.hidden_size, config.hidden_size), - nn.GELU(), - nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False), - nn.Dropout(drop_out), - nn.Linear(config.hidden_size, num_labels) - ) - self.initialize(config.hidden_size) - - def initialize(self, hidden_size): - std = math.sqrt(2.0 / (5.0 * hidden_size)) - nn.init.trunc_normal_(self.nonlinearity[1].weight, mean=0.0, std=std, a=-2*std, b=2*std) - nn.init.trunc_normal_(self.nonlinearity[-1].weight, mean=0.0, std=std, a=-2*std, b=2*std) - self.nonlinearity[1].bias.data.zero_() - self.nonlinearity[-1].bias.data.zero_() - - def forward(self, x): - x = self.nonlinearity(x) - return x - - -class NorbertForSequenceClassification(NorbertModel): - _keys_to_ignore_on_load_unexpected = ["classifier"] - _keys_to_ignore_on_load_missing = ["head"] - - def __init__(self, config): - super().__init__(config, add_mlm_layer=False) - - self.num_labels = config.num_labels - self.head = Classifier(config, self.num_labels) - - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: Optional[torch.LongTensor] = None, - ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask) - logits = self.head(sequence_output[:, 0, :]) - - loss = None - if labels is not None: - if self.config.problem_type is None: - if self.num_labels == 1: - self.config.problem_type = "regression" - elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): - self.config.problem_type = "single_label_classification" - else: - self.config.problem_type = "multi_label_classification" - - if self.config.problem_type == "regression": - loss_fct = nn.MSELoss() - if self.num_labels == 1: - loss = loss_fct(logits.squeeze(), labels.squeeze()) - else: - loss = loss_fct(logits, labels) - elif self.config.problem_type == "single_label_classification": - loss_fct = nn.CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - elif self.config.problem_type == "multi_label_classification": - loss_fct = nn.BCEWithLogitsLoss() - loss = loss_fct(logits, labels) - - if not return_dict: - output = ( - logits, - *([contextualized_embeddings] if output_hidden_states else []), - *([attention_probs] if output_attentions else []) - ) - return ((loss,) + output) if loss is not None else output - - return SequenceClassifierOutput( - loss=loss, - logits=logits, - hidden_states=contextualized_embeddings if output_hidden_states else None, - attentions=attention_probs if output_attentions else None - ) - - -class NorbertForTokenClassification(NorbertModel): - _keys_to_ignore_on_load_unexpected = ["classifier"] - _keys_to_ignore_on_load_missing = ["head"] - - def __init__(self, config): - super().__init__(config, add_mlm_layer=False) - - self.num_labels = config.num_labels - self.head = Classifier(config, self.num_labels) - - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: Optional[torch.LongTensor] = None, - ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask) - logits = self.head(sequence_output) - - loss = None - if labels is not None: - loss_fct = nn.CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - - if not return_dict: - output = ( - logits, - *([contextualized_embeddings] if output_hidden_states else []), - *([attention_probs] if output_attentions else []) - ) - return ((loss,) + output) if loss is not None else output - - return TokenClassifierOutput( - loss=loss, - logits=logits, - hidden_states=contextualized_embeddings if output_hidden_states else None, - attentions=attention_probs if output_attentions else None - ) - - -class NorbertForQuestionAnswering(NorbertModel): - _keys_to_ignore_on_load_unexpected = ["classifier"] - _keys_to_ignore_on_load_missing = ["head"] - - def __init__(self, config): - super().__init__(config, add_mlm_layer=False) - - self.num_labels = config.num_labels - self.head = Classifier(config, self.num_labels) - - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - start_positions: Optional[torch.Tensor] = None, - end_positions: Optional[torch.Tensor] = None - ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask) - logits = self.head(sequence_output) - - start_logits, end_logits = logits.split(1, dim=-1) - start_logits = start_logits.squeeze(-1).contiguous() - end_logits = end_logits.squeeze(-1).contiguous() - - total_loss = None - if start_positions is not None and end_positions is not None: - # If we are on multi-GPU, split add a dimension - if len(start_positions.size()) > 1: - start_positions = start_positions.squeeze(-1) - if len(end_positions.size()) > 1: - end_positions = end_positions.squeeze(-1) - - # sometimes the start/end positions are outside our model inputs, we ignore these terms - ignored_index = start_logits.size(1) - start_positions = start_positions.clamp(0, ignored_index) - end_positions = end_positions.clamp(0, ignored_index) - - loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index) - start_loss = loss_fct(start_logits, start_positions) - end_loss = loss_fct(end_logits, end_positions) - total_loss = (start_loss + end_loss) / 2 - - if not return_dict: - output = ( - start_logits, - end_logits, - *([contextualized_embeddings] if output_hidden_states else []), - *([attention_probs] if output_attentions else []) - ) - return ((total_loss,) + output) if total_loss is not None else output - - return QuestionAnsweringModelOutput( - loss=total_loss, - start_logits=start_logits, - end_logits=end_logits, - hidden_states=contextualized_embeddings if output_hidden_states else None, - attentions=attention_probs if output_attentions else None - ) - - -class NorbertForMultipleChoice(NorbertModel): - _keys_to_ignore_on_load_unexpected = ["classifier"] - _keys_to_ignore_on_load_missing = ["head"] - - def __init__(self, config): - super().__init__(config, add_mlm_layer=False) - - self.num_labels = getattr(config, "num_labels", 2) - self.head = Classifier(config, self.num_labels) - - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None - ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - num_choices = input_ids.shape[1] - - flat_input_ids = input_ids.view(-1, input_ids.size(-1)) - flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None - - sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(flat_input_ids, flat_attention_mask) - logits = self.head(sequence_output) - reshaped_logits = logits.view(-1, num_choices) - - loss = None - if labels is not None: - loss_fct = nn.CrossEntropyLoss() - loss = loss_fct(reshaped_logits, labels) - - if not return_dict: - output = ( - reshaped_logits, - *([contextualized_embeddings] if output_hidden_states else []), - *([attention_probs] if output_attentions else []) - ) - return ((loss,) + output) if loss is not None else output - - return MultipleChoiceModelOutput( - loss=loss, - logits=reshaped_logits, - hidden_states=contextualized_embeddings if output_hidden_states else None, - attentions=attention_probs if output_attentions else None - ) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/altair/vegalite/api.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/altair/vegalite/api.py deleted file mode 100644 index 6602986fe9c617eb5f4e375c94985260a2773aaa..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/altair/vegalite/api.py +++ /dev/null @@ -1,2 +0,0 @@ -# ruff: noqa -from .v5.api import * diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I_B_.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I_B_.py deleted file mode 100644 index 8a6c14c444595508c35bdc6ebace60b4bbbbdaba..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I_B_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .T_S_I_V_ import table_T_S_I_V_ - - -class table_T_S_I_B_(table_T_S_I_V_): - pass diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Image-003ee87c.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Image-003ee87c.css deleted file mode 100644 index 60f45635043d082881d8d8a529c1142ee028a68b..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Image-003ee87c.css +++ /dev/null @@ -1 +0,0 @@ -img.svelte-gqt00k{border-radius:var(--radius-lg);max-width:none}img.selected.svelte-gqt00k{border-color:var(--border-color-accent)}.table.svelte-gqt00k{margin:0 auto;border:2px solid var(--border-color-primary);border-radius:var(--radius-lg);width:var(--size-20);height:var(--size-20);object-fit:cover}.gallery.svelte-gqt00k{border:2px solid var(--border-color-primary);max-height:var(--size-20);object-fit:cover} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-7045bfe3.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-7045bfe3.js deleted file mode 100644 index d103e95b282c9b2037caf03c1b14b261ac207958..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-7045bfe3.js +++ /dev/null @@ -1,13 +0,0 @@ -const VERSION_RE = new RegExp("3.40.1/", "g");function import_fix(mod, base) {const url = new URL(mod, base); return import(`https://gradio.s3-us-west-2.amazonaws.com/3.40.1/${url.pathname?.startsWith('/') ? url.pathname.substring(1).replace(VERSION_RE, "") : url.pathname.replace(VERSION_RE, "")}`);}import{S as kt,e as vt,s as xt,f as oo,g as se,h as Ce,j as ei,n as Ci,k as Ae,_ as Pe,m as dt,C as Mr,D as $c,N as Rt,F as oe,o as pt,Y as vn,G as le,p as ta,w as F,r as Wn,u as W,v as zn,H as ae,ai as ia,y as na,ao as xn,am as sa,V as ra,ae as oa,P as la,Q as aa,R as ha,O as ti,T as ii,E as Ji}from"./index-9e76ffee.js";import{f as Sn,B as ca}from"./Button-30a08c0b.js";import{C as ef,a as fa}from"./Copy-92242405.js";import{D as tf}from"./Download-e6704cf2.js";import{B as ua}from"./BlockLabel-9545c6da.js";import{E as nf}from"./Empty-8e3485c0.js";function sf(n){let e,t;return{c(){e=oo("svg"),t=oo("path"),se(t,"fill","currentColor"),se(t,"d","m31 16l-7 7l-1.41-1.41L28.17 16l-5.58-5.59L24 9l7 7zM1 16l7-7l1.41 1.41L3.83 16l5.58 5.59L8 23l-7-7zm11.42 9.484L17.64 6l1.932.517L14.352 26z"),se(e,"width","100%"),se(e,"height","100%"),se(e,"viewBox","0 0 32 32")},m(i,s){Ce(i,e,s),ei(e,t)},p:Ci,i:Ci,o:Ci,d(i){i&&Ae(e)}}}let Dr=class extends kt{constructor(e){super(),vt(this,e,null,sf,xt,{})}};class N{constructor(){}lineAt(e){if(e<0||e>this.length)throw new RangeError(`Invalid position ${e} in document of length ${this.length}`);return this.lineInner(e,!1,1,0)}line(e){if(e<1||e>this.lines)throw new RangeError(`Invalid line number ${e} in ${this.lines}-line document`);return this.lineInner(e,!0,1,0)}replace(e,t,i){let s=[];return this.decompose(0,e,s,2),i.length&&i.decompose(0,i.length,s,3),this.decompose(t,this.length,s,1),Je.from(s,this.length-(t-e)+i.length)}append(e){return this.replace(this.length,this.length,e)}slice(e,t=this.length){let i=[];return this.decompose(e,t,i,0),Je.from(i,t-e)}eq(e){if(e==this)return!0;if(e.length!=this.length||e.lines!=this.lines)return!1;let t=this.scanIdentical(e,1),i=this.length-this.scanIdentical(e,-1),s=new Ai(this),r=new Ai(e);for(let o=t,l=t;;){if(s.next(o),r.next(o),o=0,s.lineBreak!=r.lineBreak||s.done!=r.done||s.value!=r.value)return!1;if(l+=s.value.length,s.done||l>=i)return!0}}iter(e=1){return new Ai(this,e)}iterRange(e,t=this.length){return new da(this,e,t)}iterLines(e,t){let i;if(e==null)i=this.iter();else{t==null&&(t=this.lines+1);let s=this.line(e).from;i=this.iterRange(s,Math.max(s,t==this.lines+1?this.length:t<=1?0:this.line(t-1).to))}return new pa(i)}toString(){return this.sliceString(0)}toJSON(){let e=[];return this.flatten(e),e}static of(e){if(e.length==0)throw new RangeError("A document must have at least one line");return e.length==1&&!e[0]?N.empty:e.length<=32?new te(e):Je.from(te.split(e,[]))}}class te extends N{constructor(e,t=rf(e)){super(),this.text=e,this.length=t}get lines(){return this.text.length}get children(){return null}lineInner(e,t,i,s){for(let r=0;;r++){let o=this.text[r],l=s+o.length;if((t?i:l)>=e)return new of(s,l,i,o);s=l+1,i++}}decompose(e,t,i,s){let r=e<=0&&t>=this.length?this:new te(lo(this.text,e,t),Math.min(t,this.length)-Math.max(0,e));if(s&1){let o=i.pop(),l=mn(r.text,o.text.slice(),0,r.length);if(l.length<=32)i.push(new te(l,o.length+r.length));else{let a=l.length>>1;i.push(new te(l.slice(0,a)),new te(l.slice(a)))}}else i.push(r)}replace(e,t,i){if(!(i instanceof te))return super.replace(e,t,i);let s=mn(this.text,mn(i.text,lo(this.text,0,e)),t),r=this.length+i.length-(t-e);return s.length<=32?new te(s,r):Je.from(te.split(s,[]),r)}sliceString(e,t=this.length,i=` -`){let s="";for(let r=0,o=0;r<=t&&oe&&o&&(s+=i),er&&(s+=l.slice(Math.max(0,e-r),t-r)),r=a+1}return s}flatten(e){for(let t of this.text)e.push(t)}scanIdentical(){return 0}static split(e,t){let i=[],s=-1;for(let r of e)i.push(r),s+=r.length+1,i.length==32&&(t.push(new te(i,s)),i=[],s=-1);return s>-1&&t.push(new te(i,s)),t}}class Je extends N{constructor(e,t){super(),this.children=e,this.length=t,this.lines=0;for(let i of e)this.lines+=i.lines}lineInner(e,t,i,s){for(let r=0;;r++){let o=this.children[r],l=s+o.length,a=i+o.lines-1;if((t?a:l)>=e)return o.lineInner(e,t,i,s);s=l+1,i=a+1}}decompose(e,t,i,s){for(let r=0,o=0;o<=t&&r=o){let h=s&((o<=e?1:0)|(a>=t?2:0));o>=e&&a<=t&&!h?i.push(l):l.decompose(e-o,t-o,i,h)}o=a+1}}replace(e,t,i){if(i.lines=r&&t<=l){let a=o.replace(e-r,t-r,i),h=this.lines-o.lines+a.lines;if(a.lines>5-1&&a.lines>h>>5+1){let c=this.children.slice();return c[s]=a,new Je(c,this.length-(t-e)+i.length)}return super.replace(r,l,a)}r=l+1}return super.replace(e,t,i)}sliceString(e,t=this.length,i=` -`){let s="";for(let r=0,o=0;re&&r&&(s+=i),eo&&(s+=l.sliceString(e-o,t-o,i)),o=a+1}return s}flatten(e){for(let t of this.children)t.flatten(e)}scanIdentical(e,t){if(!(e instanceof Je))return 0;let i=0,[s,r,o,l]=t>0?[0,0,this.children.length,e.children.length]:[this.children.length-1,e.children.length-1,-1,-1];for(;;s+=t,r+=t){if(s==o||r==l)return i;let a=this.children[s],h=e.children[r];if(a!=h)return i+a.scanIdentical(h,t);i+=a.length+1}}static from(e,t=e.reduce((i,s)=>i+s.length+1,-1)){let i=0;for(let d of e)i+=d.lines;if(i<32){let d=[];for(let p of e)p.flatten(d);return new te(d,t)}let s=Math.max(32,i>>5),r=s<<1,o=s>>1,l=[],a=0,h=-1,c=[];function f(d){let p;if(d.lines>r&&d instanceof Je)for(let g of d.children)f(g);else d.lines>o&&(a>o||!a)?(u(),l.push(d)):d instanceof te&&a&&(p=c[c.length-1])instanceof te&&d.lines+p.lines<=32?(a+=d.lines,h+=d.length+1,c[c.length-1]=new te(p.text.concat(d.text),p.length+1+d.length)):(a+d.lines>s&&u(),a+=d.lines,h+=d.length+1,c.push(d))}function u(){a!=0&&(l.push(c.length==1?c[0]:Je.from(c,h)),h=-1,a=c.length=0)}for(let d of e)f(d);return u(),l.length==1?l[0]:new Je(l,t)}}N.empty=new te([""],0);function rf(n){let e=-1;for(let t of n)e+=t.length+1;return e}function mn(n,e,t=0,i=1e9){for(let s=0,r=0,o=!0;r=t&&(a>i&&(l=l.slice(0,i-s)),s0?1:(e instanceof te?e.text.length:e.children.length)<<1]}nextInner(e,t){for(this.done=this.lineBreak=!1;;){let i=this.nodes.length-1,s=this.nodes[i],r=this.offsets[i],o=r>>1,l=s instanceof te?s.text.length:s.children.length;if(o==(t>0?l:0)){if(i==0)return this.done=!0,this.value="",this;t>0&&this.offsets[i-1]++,this.nodes.pop(),this.offsets.pop()}else if((r&1)==(t>0?0:1)){if(this.offsets[i]+=t,e==0)return this.lineBreak=!0,this.value=` -`,this;e--}else if(s instanceof te){let a=s.text[o+(t<0?-1:0)];if(this.offsets[i]+=t,a.length>Math.max(0,e))return this.value=e==0?a:t>0?a.slice(e):a.slice(0,a.length-e),this;e-=a.length}else{let a=s.children[o+(t<0?-1:0)];e>a.length?(e-=a.length,this.offsets[i]+=t):(t<0&&this.offsets[i]--,this.nodes.push(a),this.offsets.push(t>0?1:(a instanceof te?a.text.length:a.children.length)<<1))}}}next(e=0){return e<0&&(this.nextInner(-e,-this.dir),e=this.value.length),this.nextInner(e,this.dir)}}class da{constructor(e,t,i){this.value="",this.done=!1,this.cursor=new Ai(e,t>i?-1:1),this.pos=t>i?e.length:0,this.from=Math.min(t,i),this.to=Math.max(t,i)}nextInner(e,t){if(t<0?this.pos<=this.from:this.pos>=this.to)return this.value="",this.done=!0,this;e+=Math.max(0,t<0?this.pos-this.to:this.from-this.pos);let i=t<0?this.pos-this.from:this.to-this.pos;e>i&&(e=i),i-=e;let{value:s}=this.cursor.next(e);return this.pos+=(s.length+e)*t,this.value=s.length<=i?s:t<0?s.slice(s.length-i):s.slice(0,i),this.done=!this.value,this}next(e=0){return e<0?e=Math.max(e,this.from-this.pos):e>0&&(e=Math.min(e,this.to-this.pos)),this.nextInner(e,this.cursor.dir)}get lineBreak(){return this.cursor.lineBreak&&this.value!=""}}class pa{constructor(e){this.inner=e,this.afterBreak=!0,this.value="",this.done=!1}next(e=0){let{done:t,lineBreak:i,value:s}=this.inner.next(e);return t?(this.done=!0,this.value=""):i?this.afterBreak?this.value="":(this.afterBreak=!0,this.next()):(this.value=s,this.afterBreak=!1),this}get lineBreak(){return!1}}typeof Symbol<"u"&&(N.prototype[Symbol.iterator]=function(){return this.iter()},Ai.prototype[Symbol.iterator]=da.prototype[Symbol.iterator]=pa.prototype[Symbol.iterator]=function(){return this});class of{constructor(e,t,i,s){this.from=e,this.to=t,this.number=i,this.text=s}get length(){return this.to-this.from}}let Xt="lc,34,7n,7,7b,19,,,,2,,2,,,20,b,1c,l,g,,2t,7,2,6,2,2,,4,z,,u,r,2j,b,1m,9,9,,o,4,,9,,3,,5,17,3,3b,f,,w,1j,,,,4,8,4,,3,7,a,2,t,,1m,,,,2,4,8,,9,,a,2,q,,2,2,1l,,4,2,4,2,2,3,3,,u,2,3,,b,2,1l,,4,5,,2,4,,k,2,m,6,,,1m,,,2,,4,8,,7,3,a,2,u,,1n,,,,c,,9,,14,,3,,1l,3,5,3,,4,7,2,b,2,t,,1m,,2,,2,,3,,5,2,7,2,b,2,s,2,1l,2,,,2,4,8,,9,,a,2,t,,20,,4,,2,3,,,8,,29,,2,7,c,8,2q,,2,9,b,6,22,2,r,,,,,,1j,e,,5,,2,5,b,,10,9,,2u,4,,6,,2,2,2,p,2,4,3,g,4,d,,2,2,6,,f,,jj,3,qa,3,t,3,t,2,u,2,1s,2,,7,8,,2,b,9,,19,3,3b,2,y,,3a,3,4,2,9,,6,3,63,2,2,,1m,,,7,,,,,2,8,6,a,2,,1c,h,1r,4,1c,7,,,5,,14,9,c,2,w,4,2,2,,3,1k,,,2,3,,,3,1m,8,2,2,48,3,,d,,7,4,,6,,3,2,5i,1m,,5,ek,,5f,x,2da,3,3x,,2o,w,fe,6,2x,2,n9w,4,,a,w,2,28,2,7k,,3,,4,,p,2,5,,47,2,q,i,d,,12,8,p,b,1a,3,1c,,2,4,2,2,13,,1v,6,2,2,2,2,c,,8,,1b,,1f,,,3,2,2,5,2,,,16,2,8,,6m,,2,,4,,fn4,,kh,g,g,g,a6,2,gt,,6a,,45,5,1ae,3,,2,5,4,14,3,4,,4l,2,fx,4,ar,2,49,b,4w,,1i,f,1k,3,1d,4,2,2,1x,3,10,5,,8,1q,,c,2,1g,9,a,4,2,,2n,3,2,,,2,6,,4g,,3,8,l,2,1l,2,,,,,m,,e,7,3,5,5f,8,2,3,,,n,,29,,2,6,,,2,,,2,,2,6j,,2,4,6,2,,2,r,2,2d,8,2,,,2,2y,,,,2,6,,,2t,3,2,4,,5,77,9,,2,6t,,a,2,,,4,,40,4,2,2,4,,w,a,14,6,2,4,8,,9,6,2,3,1a,d,,2,ba,7,,6,,,2a,m,2,7,,2,,2,3e,6,3,,,2,,7,,,20,2,3,,,,9n,2,f0b,5,1n,7,t4,,1r,4,29,,f5k,2,43q,,,3,4,5,8,8,2,7,u,4,44,3,1iz,1j,4,1e,8,,e,,m,5,,f,11s,7,,h,2,7,,2,,5,79,7,c5,4,15s,7,31,7,240,5,gx7k,2o,3k,6o".split(",").map(n=>n?parseInt(n,36):1);for(let n=1;nn)return Xt[e-1]<=n;return!1}function ao(n){return n>=127462&&n<=127487}const ho=8205;function Oe(n,e,t=!0,i=!0){return(t?ma:af)(n,e,i)}function ma(n,e,t){if(e==n.length)return e;e&&ga(n.charCodeAt(e))&&ba(n.charCodeAt(e-1))&&e--;let i=ge(n,e);for(e+=Ee(i);e=0&&ao(ge(n,o));)r++,o-=2;if(r%2==0)break;e+=2}else break}return e}function af(n,e,t){for(;e>0;){let i=ma(n,e-2,t);if(i=56320&&n<57344}function ba(n){return n>=55296&&n<56320}function ge(n,e){let t=n.charCodeAt(e);if(!ba(t)||e+1==n.length)return t;let i=n.charCodeAt(e+1);return ga(i)?(t-55296<<10)+(i-56320)+65536:t}function ya(n){return n<=65535?String.fromCharCode(n):(n-=65536,String.fromCharCode((n>>10)+55296,(n&1023)+56320))}function Ee(n){return n<65536?1:2}const Ls=/\r\n?|\n/;var de=function(n){return n[n.Simple=0]="Simple",n[n.TrackDel=1]="TrackDel",n[n.TrackBefore=2]="TrackBefore",n[n.TrackAfter=3]="TrackAfter",n}(de||(de={}));class Qe{constructor(e){this.sections=e}get length(){let e=0;for(let t=0;te)return r+(e-s);r+=l}else{if(i!=de.Simple&&h>=e&&(i==de.TrackDel&&se||i==de.TrackBefore&&se))return null;if(h>e||h==e&&t<0&&!l)return e==s||t<0?r:r+a;r+=a}s=h}if(e>s)throw new RangeError(`Position ${e} is out of range for changeset of length ${s}`);return r}touchesRange(e,t=e){for(let i=0,s=0;i=0&&s<=t&&l>=e)return st?"cover":!0;s=l}return!1}toString(){let e="";for(let t=0;t=0?":"+s:"")}return e}toJSON(){return this.sections}static fromJSON(e){if(!Array.isArray(e)||e.length%2||e.some(t=>typeof t!="number"))throw new RangeError("Invalid JSON representation of ChangeDesc");return new Qe(e)}static create(e){return new Qe(e)}}class ne extends Qe{constructor(e,t){super(e),this.inserted=t}apply(e){if(this.length!=e.length)throw new RangeError("Applying change set to a document with the wrong length");return Is(this,(t,i,s,r,o)=>e=e.replace(s,s+(i-t),o),!1),e}mapDesc(e,t=!1){return _s(this,e,t,!0)}invert(e){let t=this.sections.slice(),i=[];for(let s=0,r=0;s=0){t[s]=l,t[s+1]=o;let a=s>>1;for(;i.length0&&ht(i,t,r.text),r.forward(c),l+=c}let h=e[o++];for(;l>1].toJSON()))}return e}static of(e,t,i){let s=[],r=[],o=0,l=null;function a(c=!1){if(!c&&!s.length)return;ou||f<0||u>t)throw new RangeError(`Invalid change range ${f} to ${u} (in doc of length ${t})`);let p=d?typeof d=="string"?N.of(d.split(i||Ls)):d:N.empty,g=p.length;if(f==u&&g==0)return;fo&&me(s,f-o,-1),me(s,u-f,g),ht(r,s,p),o=u}}return h(e),a(!l),l}static empty(e){return new ne(e?[e,-1]:[],[])}static fromJSON(e){if(!Array.isArray(e))throw new RangeError("Invalid JSON representation of ChangeSet");let t=[],i=[];for(let s=0;sl&&typeof o!="string"))throw new RangeError("Invalid JSON representation of ChangeSet");if(r.length==1)t.push(r[0],0);else{for(;i.length=0&&t<=0&&t==n[s+1]?n[s]+=e:e==0&&n[s]==0?n[s+1]+=t:i?(n[s]+=e,n[s+1]+=t):n.push(e,t)}function ht(n,e,t){if(t.length==0)return;let i=e.length-2>>1;if(i>1])),!(t||o==n.sections.length||n.sections[o+1]<0);)l=n.sections[o++],a=n.sections[o++];e(s,h,r,c,f),s=h,r=c}}}function _s(n,e,t,i=!1){let s=[],r=i?[]:null,o=new Bi(n),l=new Bi(e);for(let a=-1;;)if(o.ins==-1&&l.ins==-1){let h=Math.min(o.len,l.len);me(s,h,-1),o.forward(h),l.forward(h)}else if(l.ins>=0&&(o.ins<0||a==o.i||o.off==0&&(l.len=0&&a=0){let h=0,c=o.len;for(;c;)if(l.ins==-1){let f=Math.min(c,l.len);h+=f,c-=f,l.forward(f)}else if(l.ins==0&&l.lena||o.ins>=0&&o.len>a)&&(l||i.length>h),r.forward2(a),o.forward(a)}}}}class Bi{constructor(e){this.set=e,this.i=0,this.next()}next(){let{sections:e}=this.set;this.i>1;return t>=e.length?N.empty:e[t]}textBit(e){let{inserted:t}=this.set,i=this.i-2>>1;return i>=t.length&&!e?N.empty:t[i].slice(this.off,e==null?void 0:this.off+e)}forward(e){e==this.len?this.next():(this.len-=e,this.off+=e)}forward2(e){this.ins==-1?this.forward(e):e==this.ins?this.next():(this.ins-=e,this.off+=e)}}class Ot{constructor(e,t,i){this.from=e,this.to=t,this.flags=i}get anchor(){return this.flags&16?this.to:this.from}get head(){return this.flags&16?this.from:this.to}get empty(){return this.from==this.to}get assoc(){return this.flags&4?-1:this.flags&8?1:0}get bidiLevel(){let e=this.flags&3;return e==3?null:e}get goalColumn(){let e=this.flags>>5;return e==33554431?void 0:e}map(e,t=-1){let i,s;return this.empty?i=s=e.mapPos(this.from,t):(i=e.mapPos(this.from,1),s=e.mapPos(this.to,-1)),i==this.from&&s==this.to?this:new Ot(i,s,this.flags)}extend(e,t=e){if(e<=this.anchor&&t>=this.anchor)return k.range(e,t);let i=Math.abs(e-this.anchor)>Math.abs(t-this.anchor)?e:t;return k.range(this.anchor,i)}eq(e){return this.anchor==e.anchor&&this.head==e.head}toJSON(){return{anchor:this.anchor,head:this.head}}static fromJSON(e){if(!e||typeof e.anchor!="number"||typeof e.head!="number")throw new RangeError("Invalid JSON representation for SelectionRange");return k.range(e.anchor,e.head)}static create(e,t,i){return new Ot(e,t,i)}}class k{constructor(e,t){this.ranges=e,this.mainIndex=t}map(e,t=-1){return e.empty?this:k.create(this.ranges.map(i=>i.map(e,t)),this.mainIndex)}eq(e){if(this.ranges.length!=e.ranges.length||this.mainIndex!=e.mainIndex)return!1;for(let t=0;te.toJSON()),main:this.mainIndex}}static fromJSON(e){if(!e||!Array.isArray(e.ranges)||typeof e.main!="number"||e.main>=e.ranges.length)throw new RangeError("Invalid JSON representation for EditorSelection");return new k(e.ranges.map(t=>Ot.fromJSON(t)),e.main)}static single(e,t=e){return new k([k.range(e,t)],0)}static create(e,t=0){if(e.length==0)throw new RangeError("A selection needs at least one range");for(let i=0,s=0;se?4:0))}static normalized(e,t=0){let i=e[t];e.sort((s,r)=>s.from-r.from),t=e.indexOf(i);for(let s=1;sr.head?k.range(a,l):k.range(l,a))}}return new k(e,t)}}function ka(n,e){for(let t of n.ranges)if(t.to>e)throw new RangeError("Selection points outside of document")}let Tr=0;class O{constructor(e,t,i,s,r){this.combine=e,this.compareInput=t,this.compare=i,this.isStatic=s,this.id=Tr++,this.default=e([]),this.extensions=typeof r=="function"?r(this):r}static define(e={}){return new O(e.combine||(t=>t),e.compareInput||((t,i)=>t===i),e.compare||(e.combine?(t,i)=>t===i:Or),!!e.static,e.enables)}of(e){return new gn([],this,0,e)}compute(e,t){if(this.isStatic)throw new Error("Can't compute a static facet");return new gn(e,this,1,t)}computeN(e,t){if(this.isStatic)throw new Error("Can't compute a static facet");return new gn(e,this,2,t)}from(e,t){return t||(t=i=>i),this.compute([e],i=>t(i.field(e)))}}function Or(n,e){return n==e||n.length==e.length&&n.every((t,i)=>t===e[i])}class gn{constructor(e,t,i,s){this.dependencies=e,this.facet=t,this.type=i,this.value=s,this.id=Tr++}dynamicSlot(e){var t;let i=this.value,s=this.facet.compareInput,r=this.id,o=e[r]>>1,l=this.type==2,a=!1,h=!1,c=[];for(let f of this.dependencies)f=="doc"?a=!0:f=="selection"?h=!0:((t=e[f.id])!==null&&t!==void 0?t:1)&1||c.push(e[f.id]);return{create(f){return f.values[o]=i(f),1},update(f,u){if(a&&u.docChanged||h&&(u.docChanged||u.selection)||Ns(f,c)){let d=i(f);if(l?!co(d,f.values[o],s):!s(d,f.values[o]))return f.values[o]=d,1}return 0},reconfigure:(f,u)=>{let d=i(f),p=u.config.address[r];if(p!=null){let g=An(u,p);if(this.dependencies.every(b=>b instanceof O?u.facet(b)===f.facet(b):b instanceof Me?u.field(b,!1)==f.field(b,!1):!0)||(l?co(d,g,s):s(d,g)))return f.values[o]=g,0}return f.values[o]=d,1}}}}function co(n,e,t){if(n.length!=e.length)return!1;for(let i=0;in[a.id]),s=t.map(a=>a.type),r=i.filter(a=>!(a&1)),o=n[e.id]>>1;function l(a){let h=[];for(let c=0;ci===s),e);return e.provide&&(t.provides=e.provide(t)),t}create(e){let t=e.facet(fo).find(i=>i.field==this);return(t?.create||this.createF)(e)}slot(e){let t=e[this.id]>>1;return{create:i=>(i.values[t]=this.create(i),1),update:(i,s)=>{let r=i.values[t],o=this.updateF(r,s);return this.compareF(r,o)?0:(i.values[t]=o,1)},reconfigure:(i,s)=>s.config.address[this.id]!=null?(i.values[t]=s.field(this),0):(i.values[t]=this.create(i),1)}}init(e){return[this,fo.of({field:this,create:e})]}get extension(){return this}}const Dt={lowest:4,low:3,default:2,high:1,highest:0};function di(n){return e=>new va(e,n)}const Wi={highest:di(Dt.highest),high:di(Dt.high),default:di(Dt.default),low:di(Dt.low),lowest:di(Dt.lowest)};class va{constructor(e,t){this.inner=e,this.prec=t}}class qn{of(e){return new Vs(this,e)}reconfigure(e){return qn.reconfigure.of({compartment:this,extension:e})}get(e){return e.config.compartments.get(this)}}class Vs{constructor(e,t){this.compartment=e,this.inner=t}}class Cn{constructor(e,t,i,s,r,o){for(this.base=e,this.compartments=t,this.dynamicSlots=i,this.address=s,this.staticValues=r,this.facets=o,this.statusTemplate=[];this.statusTemplate.length>1]}static resolve(e,t,i){let s=[],r=Object.create(null),o=new Map;for(let u of cf(e,t,o))u instanceof Me?s.push(u):(r[u.facet.id]||(r[u.facet.id]=[])).push(u);let l=Object.create(null),a=[],h=[];for(let u of s)l[u.id]=h.length<<1,h.push(d=>u.slot(d));let c=i?.config.facets;for(let u in r){let d=r[u],p=d[0].facet,g=c&&c[u]||[];if(d.every(b=>b.type==0))if(l[p.id]=a.length<<1|1,Or(g,d))a.push(i.facet(p));else{let b=p.combine(d.map(w=>w.value));a.push(i&&p.compare(b,i.facet(p))?i.facet(p):b)}else{for(let b of d)b.type==0?(l[b.id]=a.length<<1|1,a.push(b.value)):(l[b.id]=h.length<<1,h.push(w=>b.dynamicSlot(w)));l[p.id]=h.length<<1,h.push(b=>hf(b,p,d))}}let f=h.map(u=>u(l));return new Cn(e,o,f,l,a,r)}}function cf(n,e,t){let i=[[],[],[],[],[]],s=new Map;function r(o,l){let a=s.get(o);if(a!=null){if(a<=l)return;let h=i[a].indexOf(o);h>-1&&i[a].splice(h,1),o instanceof Vs&&t.delete(o.compartment)}if(s.set(o,l),Array.isArray(o))for(let h of o)r(h,l);else if(o instanceof Vs){if(t.has(o.compartment))throw new RangeError("Duplicate use of compartment in extensions");let h=e.get(o.compartment)||o.inner;t.set(o.compartment,h),r(h,l)}else if(o instanceof va)r(o.inner,o.prec);else if(o instanceof Me)i[l].push(o),o.provides&&r(o.provides,l);else if(o instanceof gn)i[l].push(o),o.facet.extensions&&r(o.facet.extensions,Dt.default);else{let h=o.extension;if(!h)throw new Error(`Unrecognized extension value in extension set (${o}). This sometimes happens because multiple instances of @codemirror/state are loaded, breaking instanceof checks.`);r(h,l)}}return r(n,Dt.default),i.reduce((o,l)=>o.concat(l))}function Mi(n,e){if(e&1)return 2;let t=e>>1,i=n.status[t];if(i==4)throw new Error("Cyclic dependency between fields and/or facets");if(i&2)return i;n.status[t]=4;let s=n.computeSlot(n,n.config.dynamicSlots[t]);return n.status[t]=2|s}function An(n,e){return e&1?n.config.staticValues[e>>1]:n.values[e>>1]}const xa=O.define(),Sa=O.define({combine:n=>n.some(e=>e),static:!0}),Ca=O.define({combine:n=>n.length?n[0]:void 0,static:!0}),Aa=O.define(),Ma=O.define(),Da=O.define(),Ta=O.define({combine:n=>n.length?n[0]:!1});class Ht{constructor(e,t){this.type=e,this.value=t}static define(){return new ff}}class ff{of(e){return new Ht(this,e)}}class uf{constructor(e){this.map=e}of(e){return new R(this,e)}}class R{constructor(e,t){this.type=e,this.value=t}map(e){let t=this.type.map(this.value,e);return t===void 0?void 0:t==this.value?this:new R(this.type,t)}is(e){return this.type==e}static define(e={}){return new uf(e.map||(t=>t))}static mapEffects(e,t){if(!e.length)return e;let i=[];for(let s of e){let r=s.map(t);r&&i.push(r)}return i}}R.reconfigure=R.define();R.appendConfig=R.define();class re{constructor(e,t,i,s,r,o){this.startState=e,this.changes=t,this.selection=i,this.effects=s,this.annotations=r,this.scrollIntoView=o,this._doc=null,this._state=null,i&&ka(i,t.newLength),r.some(l=>l.type==re.time)||(this.annotations=r.concat(re.time.of(Date.now())))}static create(e,t,i,s,r,o){return new re(e,t,i,s,r,o)}get newDoc(){return this._doc||(this._doc=this.changes.apply(this.startState.doc))}get newSelection(){return this.selection||this.startState.selection.map(this.changes)}get state(){return this._state||this.startState.applyTransaction(this),this._state}annotation(e){for(let t of this.annotations)if(t.type==e)return t.value}get docChanged(){return!this.changes.empty}get reconfigured(){return this.startState.config!=this.state.config}isUserEvent(e){let t=this.annotation(re.userEvent);return!!(t&&(t==e||t.length>e.length&&t.slice(0,e.length)==e&&t[e.length]=="."))}}re.time=Ht.define();re.userEvent=Ht.define();re.addToHistory=Ht.define();re.remote=Ht.define();function df(n,e){let t=[];for(let i=0,s=0;;){let r,o;if(i=n[i]))r=n[i++],o=n[i++];else if(s=0;s--){let r=i[s](n);r instanceof re?n=r:Array.isArray(r)&&r.length==1&&r[0]instanceof re?n=r[0]:n=Ba(e,Zt(r),!1)}return n}function mf(n){let e=n.startState,t=e.facet(Da),i=n;for(let s=t.length-1;s>=0;s--){let r=t[s](n);r&&Object.keys(r).length&&(i=Oa(i,Fs(e,r,n.changes.newLength),!0))}return i==n?n:re.create(e,n.changes,n.selection,i.effects,i.annotations,i.scrollIntoView)}const gf=[];function Zt(n){return n==null?gf:Array.isArray(n)?n:[n]}var Re=function(n){return n[n.Word=0]="Word",n[n.Space=1]="Space",n[n.Other=2]="Other",n}(Re||(Re={}));const bf=/[\u00df\u0587\u0590-\u05f4\u0600-\u06ff\u3040-\u309f\u30a0-\u30ff\u3400-\u4db5\u4e00-\u9fcc\uac00-\ud7af]/;let Hs;try{Hs=new RegExp("[\\p{Alphabetic}\\p{Number}_]","u")}catch{}function yf(n){if(Hs)return Hs.test(n);for(let e=0;e"€"&&(t.toUpperCase()!=t.toLowerCase()||bf.test(t)))return!0}return!1}function wf(n){return e=>{if(!/\S/.test(e))return Re.Space;if(yf(e))return Re.Word;for(let t=0;t-1)return Re.Word;return Re.Other}}class _{constructor(e,t,i,s,r,o){this.config=e,this.doc=t,this.selection=i,this.values=s,this.status=e.statusTemplate.slice(),this.computeSlot=r,o&&(o._state=this);for(let l=0;ls.set(a,l)),t=null),s.set(o.value.compartment,o.value.extension)):o.is(R.reconfigure)?(t=null,i=o.value):o.is(R.appendConfig)&&(t=null,i=Zt(i).concat(o.value));let r;t?r=e.startState.values.slice():(t=Cn.resolve(i,s,this),r=new _(t,this.doc,this.selection,t.dynamicSlots.map(()=>null),(l,a)=>a.reconfigure(l,this),null).values),new _(t,e.newDoc,e.newSelection,r,(o,l)=>l.update(o,e),e)}replaceSelection(e){return typeof e=="string"&&(e=this.toText(e)),this.changeByRange(t=>({changes:{from:t.from,to:t.to,insert:e},range:k.cursor(t.from+e.length)}))}changeByRange(e){let t=this.selection,i=e(t.ranges[0]),s=this.changes(i.changes),r=[i.range],o=Zt(i.effects);for(let l=1;lo.spec.fromJSON(l,a)))}}return _.create({doc:e.doc,selection:k.fromJSON(e.selection),extensions:t.extensions?s.concat([t.extensions]):s})}static create(e={}){let t=Cn.resolve(e.extensions||[],new Map),i=e.doc instanceof N?e.doc:N.of((e.doc||"").split(t.staticFacet(_.lineSeparator)||Ls)),s=e.selection?e.selection instanceof k?e.selection:k.single(e.selection.anchor,e.selection.head):k.single(0);return ka(s,i.length),t.staticFacet(Sa)||(s=s.asSingle()),new _(t,i,s,t.dynamicSlots.map(()=>null),(r,o)=>o.create(r),null)}get tabSize(){return this.facet(_.tabSize)}get lineBreak(){return this.facet(_.lineSeparator)||` -`}get readOnly(){return this.facet(Ta)}phrase(e,...t){for(let i of this.facet(_.phrases))if(Object.prototype.hasOwnProperty.call(i,e)){e=i[e];break}return t.length&&(e=e.replace(/\$(\$|\d*)/g,(i,s)=>{if(s=="$")return"$";let r=+(s||1);return!r||r>t.length?i:t[r-1]})),e}languageDataAt(e,t,i=-1){let s=[];for(let r of this.facet(xa))for(let o of r(this,t,i))Object.prototype.hasOwnProperty.call(o,e)&&s.push(o[e]);return s}charCategorizer(e){return wf(this.languageDataAt("wordChars",e).join(""))}wordAt(e){let{text:t,from:i,length:s}=this.doc.lineAt(e),r=this.charCategorizer(e),o=e-i,l=e-i;for(;o>0;){let a=Oe(t,o,!1);if(r(t.slice(a,o))!=Re.Word)break;o=a}for(;ln.length?n[0]:4});_.lineSeparator=Ca;_.readOnly=Ta;_.phrases=O.define({compare(n,e){let t=Object.keys(n),i=Object.keys(e);return t.length==i.length&&t.every(s=>n[s]==e[s])}});_.languageData=xa;_.changeFilter=Aa;_.transactionFilter=Ma;_.transactionExtender=Da;qn.reconfigure=R.define();function Wt(n,e,t={}){let i={};for(let s of n)for(let r of Object.keys(s)){let o=s[r],l=i[r];if(l===void 0)i[r]=o;else if(!(l===o||o===void 0))if(Object.hasOwnProperty.call(t,r))i[r]=t[r](l,o);else throw new Error("Config merge conflict for field "+r)}for(let s in e)i[s]===void 0&&(i[s]=e[s]);return i}class Lt{eq(e){return this==e}range(e,t=e){return Ws.create(e,t,this)}}Lt.prototype.startSide=Lt.prototype.endSide=0;Lt.prototype.point=!1;Lt.prototype.mapMode=de.TrackDel;let Ws=class Pa{constructor(e,t,i){this.from=e,this.to=t,this.value=i}static create(e,t,i){return new Pa(e,t,i)}};function zs(n,e){return n.from-e.from||n.value.startSide-e.value.startSide}class Br{constructor(e,t,i,s){this.from=e,this.to=t,this.value=i,this.maxPoint=s}get length(){return this.to[this.to.length-1]}findIndex(e,t,i,s=0){let r=i?this.to:this.from;for(let o=s,l=r.length;;){if(o==l)return o;let a=o+l>>1,h=r[a]-e||(i?this.value[a].endSide:this.value[a].startSide)-t;if(a==o)return h>=0?o:l;h>=0?l=a:o=a+1}}between(e,t,i,s){for(let r=this.findIndex(t,-1e9,!0),o=this.findIndex(i,1e9,!1,r);rd||u==d&&h.startSide>0&&h.endSide<=0)continue;(d-u||h.endSide-h.startSide)<0||(o<0&&(o=u),h.point&&(l=Math.max(l,d-u)),i.push(h),s.push(u-o),r.push(d-o))}return{mapped:i.length?new Br(s,r,i,l):null,pos:o}}}class H{constructor(e,t,i,s){this.chunkPos=e,this.chunk=t,this.nextLayer=i,this.maxPoint=s}static create(e,t,i,s){return new H(e,t,i,s)}get length(){let e=this.chunk.length-1;return e<0?0:Math.max(this.chunkEnd(e),this.nextLayer.length)}get size(){if(this.isEmpty)return 0;let e=this.nextLayer.size;for(let t of this.chunk)e+=t.value.length;return e}chunkEnd(e){return this.chunkPos[e]+this.chunk[e].length}update(e){let{add:t=[],sort:i=!1,filterFrom:s=0,filterTo:r=this.length}=e,o=e.filter;if(t.length==0&&!o)return this;if(i&&(t=t.slice().sort(zs)),this.isEmpty)return t.length?H.of(t):this;let l=new Ea(this,null,-1).goto(0),a=0,h=[],c=new It;for(;l.value||a=0){let f=t[a++];c.addInner(f.from,f.to,f.value)||h.push(f)}else l.rangeIndex==1&&l.chunkIndexthis.chunkEnd(l.chunkIndex)||rl.to||r=r&&e<=r+o.length&&o.between(r,e-r,t-r,i)===!1)return}this.nextLayer.between(e,t,i)}}iter(e=0){return Pi.from([this]).goto(e)}get isEmpty(){return this.nextLayer==this}static iter(e,t=0){return Pi.from(e).goto(t)}static compare(e,t,i,s,r=-1){let o=e.filter(f=>f.maxPoint>0||!f.isEmpty&&f.maxPoint>=r),l=t.filter(f=>f.maxPoint>0||!f.isEmpty&&f.maxPoint>=r),a=uo(o,l,i),h=new pi(o,a,r),c=new pi(l,a,r);i.iterGaps((f,u,d)=>po(h,f,c,u,d,s)),i.empty&&i.length==0&&po(h,0,c,0,0,s)}static eq(e,t,i=0,s){s==null&&(s=1e9);let r=e.filter(c=>!c.isEmpty&&t.indexOf(c)<0),o=t.filter(c=>!c.isEmpty&&e.indexOf(c)<0);if(r.length!=o.length)return!1;if(!r.length)return!0;let l=uo(r,o),a=new pi(r,l,0).goto(i),h=new pi(o,l,0).goto(i);for(;;){if(a.to!=h.to||!qs(a.active,h.active)||a.point&&(!h.point||!a.point.eq(h.point)))return!1;if(a.to>s)return!0;a.next(),h.next()}}static spans(e,t,i,s,r=-1){let o=new pi(e,null,r).goto(t),l=t,a=o.openStart;for(;;){let h=Math.min(o.to,i);if(o.point?(s.point(l,h,o.point,o.activeForPoint(o.to),a,o.pointRank),a=o.openEnd(h)+(o.to>h?1:0)):h>l&&(s.span(l,h,o.active,a),a=o.openEnd(h)),o.to>i)break;l=o.to,o.next()}return a}static of(e,t=!1){let i=new It;for(let s of e instanceof Ws?[e]:t?kf(e):e)i.add(s.from,s.to,s.value);return i.finish()}}H.empty=new H([],[],null,-1);function kf(n){if(n.length>1)for(let e=n[0],t=1;t0)return n.slice().sort(zs);e=i}return n}H.empty.nextLayer=H.empty;class It{constructor(){this.chunks=[],this.chunkPos=[],this.chunkStart=-1,this.last=null,this.lastFrom=-1e9,this.lastTo=-1e9,this.from=[],this.to=[],this.value=[],this.maxPoint=-1,this.setMaxPoint=-1,this.nextLayer=null}finishChunk(e){this.chunks.push(new Br(this.from,this.to,this.value,this.maxPoint)),this.chunkPos.push(this.chunkStart),this.chunkStart=-1,this.setMaxPoint=Math.max(this.setMaxPoint,this.maxPoint),this.maxPoint=-1,e&&(this.from=[],this.to=[],this.value=[])}add(e,t,i){this.addInner(e,t,i)||(this.nextLayer||(this.nextLayer=new It)).add(e,t,i)}addInner(e,t,i){let s=e-this.lastTo||i.startSide-this.last.endSide;if(s<=0&&(e-this.lastFrom||i.startSide-this.last.startSide)<0)throw new Error("Ranges must be added sorted by `from` position and `startSide`");return s<0?!1:(this.from.length==250&&this.finishChunk(!0),this.chunkStart<0&&(this.chunkStart=e),this.from.push(e-this.chunkStart),this.to.push(t-this.chunkStart),this.last=i,this.lastFrom=e,this.lastTo=t,this.value.push(i),i.point&&(this.maxPoint=Math.max(this.maxPoint,t-e)),!0)}addChunk(e,t){if((e-this.lastTo||t.value[0].startSide-this.last.endSide)<0)return!1;this.from.length&&this.finishChunk(!0),this.setMaxPoint=Math.max(this.setMaxPoint,t.maxPoint),this.chunks.push(t),this.chunkPos.push(e);let i=t.value.length-1;return this.last=t.value[i],this.lastFrom=t.from[i]+e,this.lastTo=t.to[i]+e,!0}finish(){return this.finishInner(H.empty)}finishInner(e){if(this.from.length&&this.finishChunk(!1),this.chunks.length==0)return e;let t=H.create(this.chunkPos,this.chunks,this.nextLayer?this.nextLayer.finishInner(e):e,this.setMaxPoint);return this.from=null,t}}function uo(n,e,t){let i=new Map;for(let r of n)for(let o=0;o=this.minPoint)break}}setRangeIndex(e){if(e==this.layer.chunk[this.chunkIndex].value.length){if(this.chunkIndex++,this.skip)for(;this.chunkIndex=i&&s.push(new Ea(o,t,i,r));return s.length==1?s[0]:new Pi(s)}get startSide(){return this.value?this.value.startSide:0}goto(e,t=-1e9){for(let i of this.heap)i.goto(e,t);for(let i=this.heap.length>>1;i>=0;i--)ss(this.heap,i);return this.next(),this}forward(e,t){for(let i of this.heap)i.forward(e,t);for(let i=this.heap.length>>1;i>=0;i--)ss(this.heap,i);(this.to-e||this.value.endSide-t)<0&&this.next()}next(){if(this.heap.length==0)this.from=this.to=1e9,this.value=null,this.rank=-1;else{let e=this.heap[0];this.from=e.from,this.to=e.to,this.value=e.value,this.rank=e.rank,e.value&&e.next(),ss(this.heap,0)}}}function ss(n,e){for(let t=n[e];;){let i=(e<<1)+1;if(i>=n.length)break;let s=n[i];if(i+1=0&&(s=n[i+1],i++),t.compare(s)<0)break;n[i]=t,n[e]=s,e=i}}class pi{constructor(e,t,i){this.minPoint=i,this.active=[],this.activeTo=[],this.activeRank=[],this.minActive=-1,this.point=null,this.pointFrom=0,this.pointRank=0,this.to=-1e9,this.endSide=0,this.openStart=-1,this.cursor=Pi.from(e,t,i)}goto(e,t=-1e9){return this.cursor.goto(e,t),this.active.length=this.activeTo.length=this.activeRank.length=0,this.minActive=-1,this.to=e,this.endSide=t,this.openStart=-1,this.next(),this}forward(e,t){for(;this.minActive>-1&&(this.activeTo[this.minActive]-e||this.active[this.minActive].endSide-t)<0;)this.removeActive(this.minActive);this.cursor.forward(e,t)}removeActive(e){Yi(this.active,e),Yi(this.activeTo,e),Yi(this.activeRank,e),this.minActive=mo(this.active,this.activeTo)}addActive(e){let t=0,{value:i,to:s,rank:r}=this.cursor;for(;t-1&&(this.activeTo[r]-this.cursor.from||this.active[r].endSide-this.cursor.startSide)<0){if(this.activeTo[r]>e){this.to=this.activeTo[r],this.endSide=this.active[r].endSide;break}this.removeActive(r),i&&Yi(i,r)}else if(this.cursor.value)if(this.cursor.from>e){this.to=this.cursor.from,this.endSide=this.cursor.startSide;break}else{let o=this.cursor.value;if(!o.point)this.addActive(i),this.cursor.frome&&s++,this.cursor.next();else if(t&&this.cursor.to==this.to&&this.cursor.from=0&&!(this.activeRank[i]e||this.activeTo[i]==e&&this.active[i].endSide>=this.point.endSide)&&t.push(this.active[i]);return t.reverse()}openEnd(e){let t=0;for(let i=this.activeTo.length-1;i>=0&&this.activeTo[i]>e;i--)t++;return t}}function po(n,e,t,i,s,r){n.goto(e),t.goto(i);let o=i+s,l=i,a=i-e;for(;;){let h=n.to+a-t.to||n.endSide-t.endSide,c=h<0?n.to+a:t.to,f=Math.min(c,o);if(n.point||t.point?n.point&&t.point&&(n.point==t.point||n.point.eq(t.point))&&qs(n.activeForPoint(n.to+a),t.activeForPoint(t.to))||r.comparePoint(l,f,n.point,t.point):f>l&&!qs(n.active,t.active)&&r.compareRange(l,f,n.active,t.active),c>o)break;l=c,h<=0&&n.next(),h>=0&&t.next()}}function qs(n,e){if(n.length!=e.length)return!1;for(let t=0;t=e;i--)n[i+1]=n[i];n[e]=t}function mo(n,e){let t=-1,i=1e9;for(let s=0;s=e)return s;if(s==n.length)break;r+=n.charCodeAt(s)==9?t-r%t:1,s=Oe(n,s)}return i===!0?-1:n.length}const Ks="ͼ",go=typeof Symbol>"u"?"__"+Ks:Symbol.for(Ks),Us=typeof Symbol>"u"?"__styleSet"+Math.floor(Math.random()*1e8):Symbol("styleSet"),bo=typeof globalThis<"u"?globalThis:typeof window<"u"?window:{};class mt{constructor(e,t){this.rules=[];let{finish:i}=t||{};function s(o){return/^@/.test(o)?[o]:o.split(/,\s*/)}function r(o,l,a,h){let c=[],f=/^@(\w+)\b/.exec(o[0]),u=f&&f[1]=="keyframes";if(f&&l==null)return a.push(o[0]+";");for(let d in l){let p=l[d];if(/&/.test(d))r(d.split(/,\s*/).map(g=>o.map(b=>g.replace(/&/,b))).reduce((g,b)=>g.concat(b)),p,a);else if(p&&typeof p=="object"){if(!f)throw new RangeError("The value of a property ("+d+") should be a primitive value.");r(s(d),p,c,u)}else p!=null&&c.push(d.replace(/_.*/,"").replace(/[A-Z]/g,g=>"-"+g.toLowerCase())+": "+p+";")}(c.length||u)&&a.push((i&&!f&&!h?o.map(i):o).join(", ")+" {"+c.join(" ")+"}")}for(let o in e)r(s(o),e[o],this.rules)}getRules(){return this.rules.join(` -`)}static newName(){let e=bo[go]||1;return bo[go]=e+1,Ks+e.toString(36)}static mount(e,t){(e[Us]||new vf(e)).mount(Array.isArray(t)?t:[t])}}let Zi=null;class vf{constructor(e){if(!e.head&&e.adoptedStyleSheets&&typeof CSSStyleSheet<"u"){if(Zi)return e.adoptedStyleSheets=[Zi.sheet].concat(e.adoptedStyleSheets),e[Us]=Zi;this.sheet=new CSSStyleSheet,e.adoptedStyleSheets=[this.sheet].concat(e.adoptedStyleSheets),Zi=this}else{this.styleTag=(e.ownerDocument||e).createElement("style");let t=e.head||e;t.insertBefore(this.styleTag,t.firstChild)}this.modules=[],e[Us]=this}mount(e){let t=this.sheet,i=0,s=0;for(let r=0;r-1&&(this.modules.splice(l,1),s--,l=-1),l==-1){if(this.modules.splice(s++,0,o),t)for(let a=0;a",191:"?",192:"~",219:"{",220:"|",221:"}",222:'"'},yo=typeof navigator<"u"&&/Chrome\/(\d+)/.exec(navigator.userAgent),xf=typeof navigator<"u"&&/Mac/.test(navigator.platform),Sf=typeof navigator<"u"&&/MSIE \d|Trident\/(?:[7-9]|\d{2,})\..*rv:(\d+)/.exec(navigator.userAgent),Cf=xf||yo&&+yo[1]<57;for(var ue=0;ue<10;ue++)gt[48+ue]=gt[96+ue]=String(ue);for(var ue=1;ue<=24;ue++)gt[ue+111]="F"+ue;for(var ue=65;ue<=90;ue++)gt[ue]=String.fromCharCode(ue+32),Ei[ue]=String.fromCharCode(ue);for(var rs in gt)Ei.hasOwnProperty(rs)||(Ei[rs]=gt[rs]);function Af(n){var e=Cf&&(n.ctrlKey||n.altKey||n.metaKey)||Sf&&n.shiftKey&&n.key&&n.key.length==1||n.key=="Unidentified",t=!e&&n.key||(n.shiftKey?Ei:gt)[n.keyCode]||n.key||"Unidentified";return t=="Esc"&&(t="Escape"),t=="Del"&&(t="Delete"),t=="Left"&&(t="ArrowLeft"),t=="Up"&&(t="ArrowUp"),t=="Right"&&(t="ArrowRight"),t=="Down"&&(t="ArrowDown"),t}function Mn(n){let e;return n.nodeType==11?e=n.getSelection?n:n.ownerDocument:e=n,e.getSelection()}function ni(n,e){return e?n==e||n.contains(e.nodeType!=1?e.parentNode:e):!1}function Mf(n){let e=n.activeElement;for(;e&&e.shadowRoot;)e=e.shadowRoot.activeElement;return e}function bn(n,e){if(!e.anchorNode)return!1;try{return ni(n,e.anchorNode)}catch{return!1}}function Ri(n){return n.nodeType==3?si(n,0,n.nodeValue.length).getClientRects():n.nodeType==1?n.getClientRects():[]}function Dn(n,e,t,i){return t?wo(n,e,t,i,-1)||wo(n,e,t,i,1):!1}function Tn(n){for(var e=0;;e++)if(n=n.previousSibling,!n)return e}function wo(n,e,t,i,s){for(;;){if(n==t&&e==i)return!0;if(e==(s<0?0:Li(n))){if(n.nodeName=="DIV")return!1;let r=n.parentNode;if(!r||r.nodeType!=1)return!1;e=Tn(n)+(s<0?0:1),n=r}else if(n.nodeType==1){if(n=n.childNodes[e+(s<0?-1:0)],n.nodeType==1&&n.contentEditable=="false")return!1;e=s<0?Li(n):0}else return!1}}function Li(n){return n.nodeType==3?n.nodeValue.length:n.childNodes.length}const Ra={left:0,right:0,top:0,bottom:0};function Pr(n,e){let t=e?n.left:n.right;return{left:t,right:t,top:n.top,bottom:n.bottom}}function Df(n){return{left:0,right:n.innerWidth,top:0,bottom:n.innerHeight}}function Tf(n,e,t,i,s,r,o,l){let a=n.ownerDocument,h=a.defaultView||window;for(let c=n;c;)if(c.nodeType==1){let f,u=c==a.body;if(u)f=Df(h);else{if(c.scrollHeight<=c.clientHeight&&c.scrollWidth<=c.clientWidth){c=c.assignedSlot||c.parentNode;continue}let g=c.getBoundingClientRect();f={left:g.left,right:g.left+c.clientWidth,top:g.top,bottom:g.top+c.clientHeight}}let d=0,p=0;if(s=="nearest")e.top0&&e.bottom>f.bottom+p&&(p=e.bottom-f.bottom+p+o)):e.bottom>f.bottom&&(p=e.bottom-f.bottom+o,t<0&&e.top-p0&&e.right>f.right+d&&(d=e.right-f.right+d+r)):e.right>f.right&&(d=e.right-f.right+r,t<0&&e.leftt)return f.domBoundsAround(e,t,h);if(u>=e&&s==-1&&(s=a,r=h),h>t&&f.dom.parentNode==this.dom){o=a,l=c;break}c=u,h=u+f.breakAfter}return{from:r,to:l<0?i+this.length:l,startDOM:(s?this.children[s-1].dom.nextSibling:null)||this.dom.firstChild,endDOM:o=0?this.children[o].dom:null}}markDirty(e=!1){this.dirty|=2,this.markParentsDirty(e)}markParentsDirty(e){for(let t=this.parent;t;t=t.parent){if(e&&(t.dirty|=2),t.dirty&1)return;t.dirty|=1,e=!1}}setParent(e){this.parent!=e&&(this.parent=e,this.dirty&&this.markParentsDirty(!0))}setDOM(e){this.dom&&(this.dom.cmView=null),this.dom=e,e.cmView=this}get rootView(){for(let e=this;;){let t=e.parent;if(!t)return e;e=t}}replaceChildren(e,t,i=Er){this.markDirty();for(let s=e;sthis.pos||e==this.pos&&(t>0||this.i==0||this.children[this.i-1].breakAfter))return this.off=e-this.pos,this;let i=this.children[--this.i];this.pos-=i.length+i.breakAfter}}}function Na(n,e,t,i,s,r,o,l,a){let{children:h}=n,c=h.length?h[e]:null,f=r.length?r[r.length-1]:null,u=f?f.breakAfter:o;if(!(e==i&&c&&!o&&!u&&r.length<2&&c.merge(t,s,r.length?f:null,t==0,l,a))){if(i0&&(!o&&r.length&&c.merge(t,c.length,r[0],!1,l,0)?c.breakAfter=r.shift().breakAfter:(t2);var M={mac:Co||/Mac/.test(Te.platform),windows:/Win/.test(Te.platform),linux:/Linux|X11/.test(Te.platform),ie:jn,ie_version:Fa?Gs.documentMode||6:Ys?+Ys[1]:Js?+Js[1]:0,gecko:xo,gecko_version:xo?+(/Firefox\/(\d+)/.exec(Te.userAgent)||[0,0])[1]:0,chrome:!!os,chrome_version:os?+os[1]:0,ios:Co,android:/Android\b/.test(Te.userAgent),webkit:So,safari:Ha,webkit_version:So?+(/\bAppleWebKit\/(\d+)/.exec(navigator.userAgent)||[0,0])[1]:0,tabSize:Gs.documentElement.style.tabSize!=null?"tab-size":"-moz-tab-size"};const Ef=256;class bt extends K{constructor(e){super(),this.text=e}get length(){return this.text.length}createDOM(e){this.setDOM(e||document.createTextNode(this.text))}sync(e){this.dom||this.createDOM(),this.dom.nodeValue!=this.text&&(e&&e.node==this.dom&&(e.written=!0),this.dom.nodeValue=this.text)}reuseDOM(e){e.nodeType==3&&this.createDOM(e)}merge(e,t,i){return i&&(!(i instanceof bt)||this.length-(t-e)+i.length>Ef)?!1:(this.text=this.text.slice(0,e)+(i?i.text:"")+this.text.slice(t),this.markDirty(),!0)}split(e){let t=new bt(this.text.slice(e));return this.text=this.text.slice(0,e),this.markDirty(),t}localPosFromDOM(e,t){return e==this.dom?t:t?this.text.length:0}domAtPos(e){return new be(this.dom,e)}domBoundsAround(e,t,i){return{from:i,to:i+this.length,startDOM:this.dom,endDOM:this.dom.nextSibling}}coordsAt(e,t){return Xs(this.dom,e,t)}}class et extends K{constructor(e,t=[],i=0){super(),this.mark=e,this.children=t,this.length=i;for(let s of t)s.setParent(this)}setAttrs(e){if(Ia(e),this.mark.class&&(e.className=this.mark.class),this.mark.attrs)for(let t in this.mark.attrs)e.setAttribute(t,this.mark.attrs[t]);return e}reuseDOM(e){e.nodeName==this.mark.tagName.toUpperCase()&&(this.setDOM(e),this.dirty|=6)}sync(e){this.dom?this.dirty&4&&this.setAttrs(this.dom):this.setDOM(this.setAttrs(document.createElement(this.mark.tagName))),super.sync(e)}merge(e,t,i,s,r,o){return i&&(!(i instanceof et&&i.mark.eq(this.mark))||e&&r<=0||te&&t.push(i=e&&(s=r),i=a,r++}let o=this.length-e;return this.length=e,s>-1&&(this.children.length=s,this.markDirty()),new et(this.mark,t,o)}domAtPos(e){return qa(this,e)}coordsAt(e,t){return Ka(this,e,t)}}function Xs(n,e,t){let i=n.nodeValue.length;e>i&&(e=i);let s=e,r=e,o=0;e==0&&t<0||e==i&&t>=0?M.chrome||M.gecko||(e?(s--,o=1):r=0)?0:l.length-1];return M.safari&&!o&&a.width==0&&(a=Array.prototype.find.call(l,h=>h.width)||a),o?Pr(a,o<0):a||null}class ct extends K{constructor(e,t,i){super(),this.widget=e,this.length=t,this.side=i,this.prevWidget=null}static create(e,t,i){return new(e.customView||ct)(e,t,i)}split(e){let t=ct.create(this.widget,this.length-e,this.side);return this.length-=e,t}sync(){(!this.dom||!this.widget.updateDOM(this.dom))&&(this.dom&&this.prevWidget&&this.prevWidget.destroy(this.dom),this.prevWidget=null,this.setDOM(this.widget.toDOM(this.editorView)),this.dom.contentEditable="false")}getSide(){return this.side}merge(e,t,i,s,r,o){return i&&(!(i instanceof ct)||!this.widget.compare(i.widget)||e>0&&r<=0||t0?i.length-1:0;s=i[r],!(e>0?r==0:r==i.length-1||s.top0?-1:1);return this.length?s:Pr(s,this.side>0)}get isEditable(){return!1}destroy(){super.destroy(),this.dom&&this.widget.destroy(this.dom)}}class Wa extends ct{domAtPos(e){let{topView:t,text:i}=this.widget;return t?Zs(e,0,t,i,(s,r)=>s.domAtPos(r),s=>new be(i,Math.min(s,i.nodeValue.length))):new be(i,Math.min(e,i.nodeValue.length))}sync(){this.setDOM(this.widget.toDOM())}localPosFromDOM(e,t){let{topView:i,text:s}=this.widget;return i?za(e,t,i,s):Math.min(t,this.length)}ignoreMutation(){return!1}get overrideDOMText(){return null}coordsAt(e,t){let{topView:i,text:s}=this.widget;return i?Zs(e,t,i,s,(r,o,l)=>r.coordsAt(o,l),(r,o)=>Xs(s,r,o)):Xs(s,e,t)}destroy(){var e;super.destroy(),(e=this.widget.topView)===null||e===void 0||e.destroy()}get isEditable(){return!0}canReuseDOM(){return!0}}function Zs(n,e,t,i,s,r){if(t instanceof et){for(let o=t.dom.firstChild;o;o=o.nextSibling){let l=K.get(o);if(!l)return r(n,e);let a=ni(o,i),h=l.length+(a?i.nodeValue.length:0);if(n0?-1:1);return i&&i.topt.top?{left:t.left,right:t.right,top:i.top,bottom:i.bottom}:t}get overrideDOMText(){return N.empty}}bt.prototype.children=ct.prototype.children=ri.prototype.children=Er;function Rf(n,e){let t=n.parent,i=t?t.children.indexOf(n):-1;for(;t&&i>=0;)if(e<0?i>0:ir&&e0;r--){let o=i[r-1];if(o.dom.parentNode==t)return o.domAtPos(o.length)}for(let r=s;r0&&e instanceof et&&s.length&&(i=s[s.length-1])instanceof et&&i.mark.eq(e.mark)?ja(i,e.children[0],t-1):(s.push(e),e.setParent(n)),n.length+=e.length}function Ka(n,e,t){let i=null,s=-1,r=null,o=-1;function l(h,c){for(let f=0,u=0;f=c&&(d.children.length?l(d,c-u):!r&&(p>c||u==p&&d.getSide()>0)?(r=d,o=c-u):(u0?3e8:-4e8:t>0?1e8:-1e8,new _t(e,t,t,i,e.widget||null,!1)}static replace(e){let t=!!e.block,i,s;if(e.isBlockGap)i=-5e8,s=4e8;else{let{start:r,end:o}=Ua(e,t);i=(r?t?-3e8:-1:5e8)-1,s=(o?t?2e8:1:-6e8)+1}return new _t(e,i,s,t,e.widget||null,!0)}static line(e){return new qi(e)}static set(e,t=!1){return H.of(e,t)}hasHeight(){return this.widget?this.widget.estimatedHeight>-1:!1}}E.none=H.empty;class Kn extends E{constructor(e){let{start:t,end:i}=Ua(e);super(t?-1:5e8,i?1:-6e8,null,e),this.tagName=e.tagName||"span",this.class=e.class||"",this.attrs=e.attributes||null}eq(e){return this==e||e instanceof Kn&&this.tagName==e.tagName&&this.class==e.class&&Rr(this.attrs,e.attrs)}range(e,t=e){if(e>=t)throw new RangeError("Mark decorations may not be empty");return super.range(e,t)}}Kn.prototype.point=!1;class qi extends E{constructor(e){super(-2e8,-2e8,null,e)}eq(e){return e instanceof qi&&Rr(this.spec.attributes,e.spec.attributes)}range(e,t=e){if(t!=e)throw new RangeError("Line decoration ranges must be zero-length");return super.range(e,t)}}qi.prototype.mapMode=de.TrackBefore;qi.prototype.point=!0;class _t extends E{constructor(e,t,i,s,r,o){super(t,i,r,e),this.block=s,this.isReplace=o,this.mapMode=s?t<=0?de.TrackBefore:de.TrackAfter:de.TrackDel}get type(){return this.startSide=5}eq(e){return e instanceof _t&&If(this.widget,e.widget)&&this.block==e.block&&this.startSide==e.startSide&&this.endSide==e.endSide}range(e,t=e){if(this.isReplace&&(e>t||e==t&&this.startSide>0&&this.endSide<=0))throw new RangeError("Invalid range for replacement decoration");if(!this.isReplace&&t!=e)throw new RangeError("Widget decorations can only have zero-length ranges");return super.range(e,t)}}_t.prototype.point=!0;function Ua(n,e=!1){let{inclusiveStart:t,inclusiveEnd:i}=n;return t==null&&(t=n.inclusive),i==null&&(i=n.inclusive),{start:t??e,end:i??e}}function If(n,e){return n==e||!!(n&&e&&n.compare(e))}function er(n,e,t,i=0){let s=t.length-1;s>=0&&t[s]+i>=n?t[s]=Math.max(t[s],e):t.push(n,e)}class ke extends K{constructor(){super(...arguments),this.children=[],this.length=0,this.prevAttrs=void 0,this.attrs=null,this.breakAfter=0}merge(e,t,i,s,r,o){if(i){if(!(i instanceof ke))return!1;this.dom||i.transferDOM(this)}return s&&this.setDeco(i?i.attrs:null),Va(this,e,t,i?i.children:[],r,o),!0}split(e){let t=new ke;if(t.breakAfter=this.breakAfter,this.length==0)return t;let{i,off:s}=this.childPos(e);s&&(t.append(this.children[i].split(s),0),this.children[i].merge(s,this.children[i].length,null,!1,0,0),i++);for(let r=i;r0&&this.children[i-1].length==0;)this.children[--i].destroy();return this.children.length=i,this.markDirty(),this.length=e,t}transferDOM(e){this.dom&&(this.markDirty(),e.setDOM(this.dom),e.prevAttrs=this.prevAttrs===void 0?this.attrs:this.prevAttrs,this.prevAttrs=void 0,this.dom=null)}setDeco(e){Rr(this.attrs,e)||(this.dom&&(this.prevAttrs=this.attrs,this.markDirty()),this.attrs=e)}append(e,t){ja(this,e,t)}addLineDeco(e){let t=e.spec.attributes,i=e.spec.class;t&&(this.attrs=Qs(t,this.attrs||{})),i&&(this.attrs=Qs({class:i},this.attrs||{}))}domAtPos(e){return qa(this,e)}reuseDOM(e){e.nodeName=="DIV"&&(this.setDOM(e),this.dirty|=6)}sync(e){var t;this.dom?this.dirty&4&&(Ia(this.dom),this.dom.className="cm-line",this.prevAttrs=this.attrs?null:void 0):(this.setDOM(document.createElement("div")),this.dom.className="cm-line",this.prevAttrs=this.attrs?null:void 0),this.prevAttrs!==void 0&&($s(this.dom,this.prevAttrs,this.attrs),this.dom.classList.add("cm-line"),this.prevAttrs=void 0),super.sync(e);let i=this.dom.lastChild;for(;i&&K.get(i)instanceof et;)i=i.lastChild;if(!i||!this.length||i.nodeName!="BR"&&((t=K.get(i))===null||t===void 0?void 0:t.isEditable)==!1&&(!M.ios||!this.children.some(s=>s instanceof bt))){let s=document.createElement("BR");s.cmIgnore=!0,this.dom.appendChild(s)}}measureTextSize(){if(this.children.length==0||this.length>20)return null;let e=0;for(let t of this.children){if(!(t instanceof bt)||/[^ -~]/.test(t.text))return null;let i=Ri(t.dom);if(i.length!=1)return null;e+=i[0].width}return e?{lineHeight:this.dom.getBoundingClientRect().height,charWidth:e/this.length}:null}coordsAt(e,t){return Ka(this,e,t)}become(e){return!1}get type(){return z.Text}static find(e,t){for(let i=0,s=0;i=t){if(r instanceof ke)return r;if(o>t)break}s=o+r.breakAfter}return null}}class Et extends K{constructor(e,t,i){super(),this.widget=e,this.length=t,this.type=i,this.breakAfter=0,this.prevWidget=null}merge(e,t,i,s,r,o){return i&&(!(i instanceof Et)||!this.widget.compare(i.widget)||e>0&&r<=0||t0;){if(this.textOff==this.text.length){let{value:r,lineBreak:o,done:l}=this.cursor.next(this.skip);if(this.skip=0,l)throw new Error("Ran out of text content when drawing inline views");if(o){this.posCovered()||this.getLine(),this.content.length?this.content[this.content.length-1].breakAfter=1:this.breakAtStart=1,this.flushBuffer([]),this.curLine=null,e--;continue}else this.text=r,this.textOff=0}let s=Math.min(this.text.length-this.textOff,e,512);this.flushBuffer(t.slice(0,i)),this.getLine().append(Qi(new bt(this.text.slice(this.textOff,this.textOff+s)),t),i),this.atCursorPos=!0,this.textOff+=s,e-=s,i=0}}span(e,t,i,s){this.buildText(t-e,i,s),this.pos=t,this.openStart<0&&(this.openStart=s)}point(e,t,i,s,r,o){if(this.disallowBlockEffectsFor[o]&&i instanceof _t){if(i.block)throw new RangeError("Block decorations may not be specified via plugins");if(t>this.doc.lineAt(this.pos).to)throw new RangeError("Decorations that replace line breaks may not be specified via plugins")}let l=t-e;if(i instanceof _t)if(i.block){let{type:a}=i;a==z.WidgetAfter&&!this.posCovered()&&this.getLine(),this.addBlockWidget(new Et(i.widget||new Ao("div"),l,a))}else{let a=ct.create(i.widget||new Ao("span"),l,l?0:i.startSide),h=this.atCursorPos&&!a.isEditable&&r<=s.length&&(e0),c=!a.isEditable&&(en.some(e=>e)}),$a=O.define({combine:n=>n.some(e=>e)});class On{constructor(e,t="nearest",i="nearest",s=5,r=5){this.range=e,this.y=t,this.x=i,this.yMargin=s,this.xMargin=r}map(e){return e.empty?this:new On(this.range.map(e),this.y,this.x,this.yMargin,this.xMargin)}}const Mo=R.define({map:(n,e)=>n.map(e)});function He(n,e,t){let i=n.facet(Xa);i.length?i[0](e):window.onerror?window.onerror(String(e),t,void 0,void 0,e):t?console.error(t+":",e):console.error(e)}const Un=O.define({combine:n=>n.length?n[0]:!0});let _f=0;const ki=O.define();class ye{constructor(e,t,i,s){this.id=e,this.create=t,this.domEventHandlers=i,this.extension=s(this)}static define(e,t){const{eventHandlers:i,provide:s,decorations:r}=t||{};return new ye(_f++,e,i,o=>{let l=[ki.of(o)];return r&&l.push(Ii.of(a=>{let h=a.plugin(o);return h?r(h):E.none})),s&&l.push(s(o)),l})}static fromClass(e,t){return ye.define(i=>new e(i),t)}}class ls{constructor(e){this.spec=e,this.mustUpdate=null,this.value=null}update(e){if(this.value){if(this.mustUpdate){let t=this.mustUpdate;if(this.mustUpdate=null,this.value.update)try{this.value.update(t)}catch(i){if(He(t.state,i,"CodeMirror plugin crashed"),this.value.destroy)try{this.value.destroy()}catch{}this.deactivate()}}}else if(this.spec)try{this.value=this.spec.create(e)}catch(t){He(e.state,t,"CodeMirror plugin crashed"),this.deactivate()}return this}destroy(e){var t;if(!((t=this.value)===null||t===void 0)&&t.destroy)try{this.value.destroy()}catch(i){He(e.state,i,"CodeMirror plugin crashed")}}deactivate(){this.spec=this.value=null}}const eh=O.define(),th=O.define(),Ii=O.define(),ih=O.define(),nh=O.define(),vi=O.define();class $e{constructor(e,t,i,s){this.fromA=e,this.toA=t,this.fromB=i,this.toB=s}join(e){return new $e(Math.min(this.fromA,e.fromA),Math.max(this.toA,e.toA),Math.min(this.fromB,e.fromB),Math.max(this.toB,e.toB))}addToSet(e){let t=e.length,i=this;for(;t>0;t--){let s=e[t-1];if(!(s.fromA>i.toA)){if(s.toAc)break;r+=2}if(!a)return i;new $e(a.fromA,a.toA,a.fromB,a.toB).addToSet(i),o=a.toA,l=a.toB}}}class Bn{constructor(e,t,i){this.view=e,this.state=t,this.transactions=i,this.flags=0,this.startState=e.state,this.changes=ne.empty(this.startState.doc.length);for(let o of i)this.changes=this.changes.compose(o.changes);let s=[];this.changes.iterChangedRanges((o,l,a,h)=>s.push(new $e(o,l,a,h))),this.changedRanges=s;let r=e.hasFocus;r!=e.inputState.notifiedFocused&&(e.inputState.notifiedFocused=r,this.flags|=1)}static create(e,t,i){return new Bn(e,t,i)}get viewportChanged(){return(this.flags&4)>0}get heightChanged(){return(this.flags&2)>0}get geometryChanged(){return this.docChanged||(this.flags&10)>0}get focusChanged(){return(this.flags&1)>0}get docChanged(){return!this.changes.empty}get selectionSet(){return this.transactions.some(e=>e.selection)}get empty(){return this.flags==0&&this.transactions.length==0}}var Q=function(n){return n[n.LTR=0]="LTR",n[n.RTL=1]="RTL",n}(Q||(Q={}));const ir=Q.LTR,Nf=Q.RTL;function sh(n){let e=[];for(let t=0;t=t){if(l.level==i)return o;(r<0||(s!=0?s<0?l.fromt:e[r].level>l.level))&&(r=o)}}if(r<0)throw new RangeError("Index out of range");return r}}const Z=[];function zf(n,e){let t=n.length,i=e==ir?1:2,s=e==ir?2:1;if(!n||i==1&&!Wf.test(n))return rh(t);for(let o=0,l=i,a=i;o=0;u-=3)if(ze[u+1]==-c){let d=ze[u+2],p=d&2?i:d&4?d&1?s:i:0;p&&(Z[o]=Z[ze[u]]=p),l=u;break}}else{if(ze.length==189)break;ze[l++]=o,ze[l++]=h,ze[l++]=a}else if((f=Z[o])==2||f==1){let u=f==i;a=u?0:1;for(let d=l-3;d>=0;d-=3){let p=ze[d+2];if(p&2)break;if(u)ze[d+2]|=2;else{if(p&4)break;ze[d+2]|=4}}}for(let o=0;ol;){let c=h,f=Z[--h]!=2;for(;h>l&&f==(Z[h-1]!=2);)h--;r.push(new $t(h,c,f?2:1))}else r.push(new $t(l,o,0))}else for(let o=0;o1)for(let a of this.points)a.node==e&&a.pos>this.text.length&&(a.pos-=o-1);i=r+o}}readNode(e){if(e.cmIgnore)return;let t=K.get(e),i=t&&t.overrideDOMText;if(i!=null){this.findPointInside(e,i.length);for(let s=i.iter();!s.next().done;)s.lineBreak?this.lineBreak():this.append(s.value)}else e.nodeType==3?this.readTextNode(e):e.nodeName=="BR"?e.nextSibling&&this.lineBreak():e.nodeType==1&&this.readRange(e.firstChild,null)}findPointBefore(e,t){for(let i of this.points)i.node==e&&e.childNodes[i.offset]==t&&(i.pos=this.text.length)}findPointInside(e,t){for(let i of this.points)(e.nodeType==3?i.node==e:e.contains(i.node))&&(i.pos=this.text.length+Math.min(t,i.offset))}}function Do(n){return n.nodeType==1&&/^(DIV|P|LI|UL|OL|BLOCKQUOTE|DD|DT|H\d|SECTION|PRE)$/.test(n.nodeName)}class To{constructor(e,t){this.node=e,this.offset=t,this.pos=-1}}class Oo extends K{constructor(e){super(),this.view=e,this.compositionDeco=E.none,this.decorations=[],this.dynamicDecorationMap=[],this.minWidth=0,this.minWidthFrom=0,this.minWidthTo=0,this.impreciseAnchor=null,this.impreciseHead=null,this.forceSelection=!1,this.lastUpdate=Date.now(),this.setDOM(e.contentDOM),this.children=[new ke],this.children[0].setParent(this),this.updateDeco(),this.updateInner([new $e(0,0,0,e.state.doc.length)],0)}get editorView(){return this.view}get length(){return this.view.state.doc.length}update(e){let t=e.changedRanges;this.minWidth>0&&t.length&&(t.every(({fromA:o,toA:l})=>lthis.minWidthTo)?(this.minWidthFrom=e.changes.mapPos(this.minWidthFrom,1),this.minWidthTo=e.changes.mapPos(this.minWidthTo,1)):this.minWidth=this.minWidthFrom=this.minWidthTo=0),this.view.inputState.composing<0?this.compositionDeco=E.none:(e.transactions.length||this.dirty)&&(this.compositionDeco=Kf(this.view,e.changes)),(M.ie||M.chrome)&&!this.compositionDeco.size&&e&&e.state.doc.lines!=e.startState.doc.lines&&(this.forceSelection=!0);let i=this.decorations,s=this.updateDeco(),r=Yf(i,s,e.changes);return t=$e.extendWithRanges(t,r),this.dirty==0&&t.length==0?!1:(this.updateInner(t,e.startState.doc.length),e.transactions.length&&(this.lastUpdate=Date.now()),!0)}updateInner(e,t){this.view.viewState.mustMeasureContent=!0,this.updateChildren(e,t);let{observer:i}=this.view;i.ignore(()=>{this.dom.style.height=this.view.viewState.contentHeight+"px",this.dom.style.flexBasis=this.minWidth?this.minWidth+"px":"";let r=M.chrome||M.ios?{node:i.selectionRange.focusNode,written:!1}:void 0;this.sync(r),this.dirty=0,r&&(r.written||i.selectionRange.focusNode!=r.node)&&(this.forceSelection=!0),this.dom.style.height=""});let s=[];if(this.view.viewport.from||this.view.viewport.to=0?e[s]:null;if(!r)break;let{fromA:o,toA:l,fromB:a,toB:h}=r,{content:c,breakAtStart:f,openStart:u,openEnd:d}=Lr.build(this.view.state.doc,a,h,this.decorations,this.dynamicDecorationMap),{i:p,off:g}=i.findPos(l,1),{i:b,off:w}=i.findPos(o,-1);Na(this,b,w,p,g,c,f,u,d)}}updateSelection(e=!1,t=!1){if((e||!this.view.observer.selectionRange.focusNode)&&this.view.observer.readSelectionRange(),!(t||this.mayControlSelection()))return;let i=this.forceSelection;this.forceSelection=!1;let s=this.view.state.selection.main,r=this.domAtPos(s.anchor),o=s.empty?r:this.domAtPos(s.head);if(M.gecko&&s.empty&&jf(r)){let a=document.createTextNode("");this.view.observer.ignore(()=>r.node.insertBefore(a,r.node.childNodes[r.offset]||null)),r=o=new be(a,0),i=!0}let l=this.view.observer.selectionRange;(i||!l.focusNode||!Dn(r.node,r.offset,l.anchorNode,l.anchorOffset)||!Dn(o.node,o.offset,l.focusNode,l.focusOffset))&&(this.view.observer.ignore(()=>{M.android&&M.chrome&&this.dom.contains(l.focusNode)&&Xf(l.focusNode,this.dom)&&(this.dom.blur(),this.dom.focus({preventScroll:!0}));let a=Mn(this.view.root);if(a)if(s.empty){if(M.gecko){let h=Gf(r.node,r.offset);if(h&&h!=3){let c=hh(r.node,r.offset,h==1?1:-1);c&&(r=new be(c,h==1?0:c.nodeValue.length))}}a.collapse(r.node,r.offset),s.bidiLevel!=null&&l.cursorBidiLevel!=null&&(l.cursorBidiLevel=s.bidiLevel)}else if(a.extend){a.collapse(r.node,r.offset);try{a.extend(o.node,o.offset)}catch{}}else{let h=document.createRange();s.anchor>s.head&&([r,o]=[o,r]),h.setEnd(o.node,o.offset),h.setStart(r.node,r.offset),a.removeAllRanges(),a.addRange(h)}}),this.view.observer.setSelectionRange(r,o)),this.impreciseAnchor=r.precise?null:new be(l.anchorNode,l.anchorOffset),this.impreciseHead=o.precise?null:new be(l.focusNode,l.focusOffset)}enforceCursorAssoc(){if(this.compositionDeco.size)return;let{view:e}=this,t=e.state.selection.main,i=Mn(e.root),{anchorNode:s,anchorOffset:r}=e.observer.selectionRange;if(!i||!t.empty||!t.assoc||!i.modify)return;let o=ke.find(this,t.head);if(!o)return;let l=o.posAtStart;if(t.head==l||t.head==l+o.length)return;let a=this.coordsAt(t.head,-1),h=this.coordsAt(t.head,1);if(!a||!h||a.bottom>h.top)return;let c=this.domAtPos(t.head+t.assoc);i.collapse(c.node,c.offset),i.modify("move",t.assoc<0?"forward":"backward","lineboundary"),e.observer.readSelectionRange();let f=e.observer.selectionRange;e.docView.posFromDOM(f.anchorNode,f.anchorOffset)!=t.from&&i.collapse(s,r)}mayControlSelection(){let e=this.view.root.activeElement;return e==this.dom||bn(this.dom,this.view.observer.selectionRange)&&!(e&&this.dom.contains(e))}nearest(e){for(let t=e;t;){let i=K.get(t);if(i&&i.rootView==this)return i;t=t.parentNode}return null}posFromDOM(e,t){let i=this.nearest(e);if(!i)throw new RangeError("Trying to find position for a DOM position outside of the document");return i.localPosFromDOM(e,t)+i.posAtStart}domAtPos(e){let{i:t,off:i}=this.childCursor().findPos(e,-1);for(;to||e==o&&r.type!=z.WidgetBefore&&r.type!=z.WidgetAfter&&(!s||t==2||this.children[s-1].breakAfter||this.children[s-1].type==z.WidgetBefore&&t>-2))return r.coordsAt(e-o,t);i=o}}measureVisibleLineHeights(e){let t=[],{from:i,to:s}=e,r=this.view.contentDOM.clientWidth,o=r>Math.max(this.view.scrollDOM.clientWidth,this.minWidth)+1,l=-1,a=this.view.textDirection==Q.LTR;for(let h=0,c=0;cs)break;if(h>=i){let d=f.dom.getBoundingClientRect();if(t.push(d.height),o){let p=f.dom.lastChild,g=p?Ri(p):[];if(g.length){let b=g[g.length-1],w=a?b.right-d.left:d.right-b.left;w>l&&(l=w,this.minWidth=r,this.minWidthFrom=h,this.minWidthTo=u)}}}h=u+f.breakAfter}return t}textDirectionAt(e){let{i:t}=this.childPos(e,1);return getComputedStyle(this.children[t].dom).direction=="rtl"?Q.RTL:Q.LTR}measureTextSize(){for(let s of this.children)if(s instanceof ke){let r=s.measureTextSize();if(r)return r}let e=document.createElement("div"),t,i;return e.className="cm-line",e.style.width="99999px",e.textContent="abc def ghi jkl mno pqr stu",this.view.observer.ignore(()=>{this.dom.appendChild(e);let s=Ri(e.firstChild)[0];t=e.getBoundingClientRect().height,i=s?s.width/27:7,e.remove()}),{lineHeight:t,charWidth:i}}childCursor(e=this.length){let t=this.children.length;return t&&(e-=this.children[--t].length),new _a(this.children,e,t)}computeBlockGapDeco(){let e=[],t=this.view.viewState;for(let i=0,s=0;;s++){let r=s==t.viewports.length?null:t.viewports[s],o=r?r.from-1:this.length;if(o>i){let l=t.lineBlockAt(o).bottom-t.lineBlockAt(i).top;e.push(E.replace({widget:new Bo(l),block:!0,inclusive:!0,isBlockGap:!0}).range(i,o))}if(!r)break;i=r.to+1}return E.set(e)}updateDeco(){let e=this.view.state.facet(Ii).map((t,i)=>(this.dynamicDecorationMap[i]=typeof t=="function")?t(this.view):t);for(let t=e.length;tt.anchor?-1:1),s;if(!i)return;!t.empty&&(s=this.coordsAt(t.anchor,t.anchor>t.head?-1:1))&&(i={left:Math.min(i.left,s.left),top:Math.min(i.top,s.top),right:Math.max(i.right,s.right),bottom:Math.max(i.bottom,s.bottom)});let r=0,o=0,l=0,a=0;for(let c of this.view.state.facet(nh).map(f=>f(this.view)))if(c){let{left:f,right:u,top:d,bottom:p}=c;f!=null&&(r=Math.max(r,f)),u!=null&&(o=Math.max(o,u)),d!=null&&(l=Math.max(l,d)),p!=null&&(a=Math.max(a,p))}let h={left:i.left-r,top:i.top-l,right:i.right+o,bottom:i.bottom+a};Tf(this.view.scrollDOM,h,t.head0&&t<=0)n=n.childNodes[e-1],e=Li(n);else if(n.nodeType==1&&e=0)n=n.childNodes[e],e=0;else return null}}function Gf(n,e){return n.nodeType!=1?0:(e&&n.childNodes[e-1].contentEditable=="false"?1:0)|(e0;){let h=Oe(s.text,o,!1);if(i(s.text.slice(h,o))!=a)break;o=h}for(;ln?e.left-n:Math.max(0,n-e.right)}function $f(n,e){return e.top>n?e.top-n:Math.max(0,n-e.bottom)}function as(n,e){return n.tope.top+1}function Po(n,e){return en.bottom?{top:n.top,left:n.left,right:n.right,bottom:e}:n}function sr(n,e,t){let i,s,r,o,l=!1,a,h,c,f;for(let p=n.firstChild;p;p=p.nextSibling){let g=Ri(p);for(let b=0;bS||o==S&&r>y)&&(i=p,s=w,r=y,o=S,l=!y||(y>0?b0)),y==0?t>w.bottom&&(!c||c.bottomw.top)&&(h=p,f=w):c&&as(c,w)?c=Eo(c,w.bottom):f&&as(f,w)&&(f=Po(f,w.top))}}if(c&&c.bottom>=t?(i=a,s=c):f&&f.top<=t&&(i=h,s=f),!i)return{node:n,offset:0};let u=Math.max(s.left,Math.min(s.right,e));if(i.nodeType==3)return Ro(i,u,t);if(l&&i.contentEditable!="false")return sr(i,u,t);let d=Array.prototype.indexOf.call(n.childNodes,i)+(e>=(s.left+s.right)/2?1:0);return{node:n,offset:d}}function Ro(n,e,t){let i=n.nodeValue.length,s=-1,r=1e9,o=0;for(let l=0;lt?c.top-t:t-c.bottom)-1;if(c.left-1<=e&&c.right+1>=e&&f=(c.left+c.right)/2,d=u;if((M.chrome||M.gecko)&&si(n,l).getBoundingClientRect().left==c.right&&(d=!u),f<=0)return{node:n,offset:l+(d?1:0)};s=l+(d?1:0),r=f}}}return{node:n,offset:s>-1?s:o>0?n.nodeValue.length:0}}function ch(n,{x:e,y:t},i,s=-1){var r;let o=n.contentDOM.getBoundingClientRect(),l=o.top+n.viewState.paddingTop,a,{docHeight:h}=n.viewState,c=t-l;if(c<0)return 0;if(c>h)return n.state.doc.length;for(let w=n.defaultLineHeight/2,y=!1;a=n.elementAtHeight(c),a.type!=z.Text;)for(;c=s>0?a.bottom+w:a.top-w,!(c>=0&&c<=h);){if(y)return i?null:0;y=!0,s=-s}t=l+c;let f=a.from;if(fn.viewport.to)return n.viewport.to==n.state.doc.length?n.state.doc.length:i?null:Lo(n,o,a,e,t);let u=n.dom.ownerDocument,d=n.root.elementFromPoint?n.root:u,p=d.elementFromPoint(e,t);p&&!n.contentDOM.contains(p)&&(p=null),p||(e=Math.max(o.left+1,Math.min(o.right-1,e)),p=d.elementFromPoint(e,t),p&&!n.contentDOM.contains(p)&&(p=null));let g,b=-1;if(p&&((r=n.docView.nearest(p))===null||r===void 0?void 0:r.isEditable)!=!1){if(u.caretPositionFromPoint){let w=u.caretPositionFromPoint(e,t);w&&({offsetNode:g,offset:b}=w)}else if(u.caretRangeFromPoint){let w=u.caretRangeFromPoint(e,t);w&&({startContainer:g,startOffset:b}=w,(!n.contentDOM.contains(g)||M.safari&&eu(g,b,e)||M.chrome&&tu(g,b,e))&&(g=void 0))}}if(!g||!n.docView.dom.contains(g)){let w=ke.find(n.docView,f);if(!w)return c>a.top+a.height/2?a.to:a.from;({node:g,offset:b}=sr(w.dom,e,t))}return n.docView.posFromDOM(g,b)}function Lo(n,e,t,i,s){let r=Math.round((i-e.left)*n.defaultCharacterWidth);if(n.lineWrapping&&t.height>n.defaultLineHeight*1.5){let l=Math.floor((s-t.top)/n.defaultLineHeight);r+=l*n.viewState.heightOracle.lineLength}let o=n.state.sliceDoc(t.from,t.to);return t.from+js(o,r,n.state.tabSize)}function eu(n,e,t){let i;if(n.nodeType!=3||e!=(i=n.nodeValue.length))return!1;for(let s=n.nextSibling;s;s=s.nextSibling)if(s.nodeType!=1||s.nodeName!="BR")return!1;return si(n,i-1,i).getBoundingClientRect().left>t}function tu(n,e,t){if(e!=0)return!1;for(let s=n;;){let r=s.parentNode;if(!r||r.nodeType!=1||r.firstChild!=s)return!1;if(r.classList.contains("cm-line"))break;s=r}let i=n.nodeType==1?n.getBoundingClientRect():si(n,0,Math.max(n.nodeValue.length,1)).getBoundingClientRect();return t-i.left>5}function iu(n,e,t,i){let s=n.state.doc.lineAt(e.head),r=!i||!n.lineWrapping?null:n.coordsAtPos(e.assoc<0&&e.head>s.from?e.head-1:e.head);if(r){let a=n.dom.getBoundingClientRect(),h=n.textDirectionAt(s.from),c=n.posAtCoords({x:t==(h==Q.LTR)?a.right-1:a.left+1,y:(r.top+r.bottom)/2});if(c!=null)return k.cursor(c,t?-1:1)}let o=ke.find(n.docView,e.head),l=o?t?o.posAtEnd:o.posAtStart:t?s.to:s.from;return k.cursor(l,t?-1:1)}function Io(n,e,t,i){let s=n.state.doc.lineAt(e.head),r=n.bidiSpans(s),o=n.textDirectionAt(s.from);for(let l=e,a=null;;){let h=qf(s,r,o,l,t),c=oh;if(!h){if(s.number==(t?n.state.doc.lines:1))return l;c=` -`,s=n.state.doc.line(s.number+(t?1:-1)),r=n.bidiSpans(s),h=k.cursor(t?s.from:s.to)}if(a){if(!a(c))return l}else{if(!i)return h;a=i(c)}l=h}}function nu(n,e,t){let i=n.state.charCategorizer(e),s=i(t);return r=>{let o=i(r);return s==Re.Space&&(s=o),s==o}}function su(n,e,t,i){let s=e.head,r=t?1:-1;if(s==(t?n.state.doc.length:0))return k.cursor(s,e.assoc);let o=e.goalColumn,l,a=n.contentDOM.getBoundingClientRect(),h=n.coordsAtPos(s),c=n.documentTop;if(h)o==null&&(o=h.left-a.left),l=r<0?h.top:h.bottom;else{let d=n.viewState.lineBlockAt(s);o==null&&(o=Math.min(a.right-a.left,n.defaultCharacterWidth*(s-d.from))),l=(r<0?d.top:d.bottom)+c}let f=a.left+o,u=i??n.defaultLineHeight>>1;for(let d=0;;d+=10){let p=l+(u+d)*r,g=ch(n,{x:f,y:p},!1,r);if(pa.bottom||(r<0?gs))return k.cursor(g,e.assoc,void 0,o)}}function hs(n,e,t){let i=n.state.facet(ih).map(s=>s(n));for(;;){let s=!1;for(let r of i)r.between(t.from-1,t.from+1,(o,l,a)=>{t.from>o&&t.fromt.from?k.cursor(o,1):k.cursor(l,-1),s=!0)});if(!s)return t}}class ru{constructor(e){this.lastKeyCode=0,this.lastKeyTime=0,this.lastTouchTime=0,this.lastFocusTime=0,this.lastScrollTop=0,this.lastScrollLeft=0,this.chromeScrollHack=-1,this.pendingIOSKey=void 0,this.lastSelectionOrigin=null,this.lastSelectionTime=0,this.lastEscPress=0,this.lastContextMenu=0,this.scrollHandlers=[],this.registeredEvents=[],this.customHandlers=[],this.composing=-1,this.compositionFirstChange=null,this.compositionEndedAt=0,this.mouseSelection=null;for(let t in he){let i=he[t];e.contentDOM.addEventListener(t,s=>{!_o(e,s)||this.ignoreDuringComposition(s)||t=="keydown"&&this.keydown(e,s)||(this.mustFlushObserver(s)&&e.observer.forceFlush(),this.runCustomHandlers(t,e,s)?s.preventDefault():i(e,s))},rr[t]),this.registeredEvents.push(t)}M.chrome&&M.chrome_version==102&&e.scrollDOM.addEventListener("wheel",()=>{this.chromeScrollHack<0?e.contentDOM.style.pointerEvents="none":window.clearTimeout(this.chromeScrollHack),this.chromeScrollHack=setTimeout(()=>{this.chromeScrollHack=-1,e.contentDOM.style.pointerEvents=""},100)},{passive:!0}),this.notifiedFocused=e.hasFocus,M.safari&&e.contentDOM.addEventListener("input",()=>null)}setSelectionOrigin(e){this.lastSelectionOrigin=e,this.lastSelectionTime=Date.now()}ensureHandlers(e,t){var i;let s;this.customHandlers=[];for(let r of t)if(s=(i=r.update(e).spec)===null||i===void 0?void 0:i.domEventHandlers){this.customHandlers.push({plugin:r.value,handlers:s});for(let o in s)this.registeredEvents.indexOf(o)<0&&o!="scroll"&&(this.registeredEvents.push(o),e.contentDOM.addEventListener(o,l=>{_o(e,l)&&this.runCustomHandlers(o,e,l)&&l.preventDefault()}))}}runCustomHandlers(e,t,i){for(let s of this.customHandlers){let r=s.handlers[e];if(r)try{if(r.call(s.plugin,i,t)||i.defaultPrevented)return!0}catch(o){He(t.state,o)}}return!1}runScrollHandlers(e,t){this.lastScrollTop=e.scrollDOM.scrollTop,this.lastScrollLeft=e.scrollDOM.scrollLeft;for(let i of this.customHandlers){let s=i.handlers.scroll;if(s)try{s.call(i.plugin,t,e)}catch(r){He(e.state,r)}}}keydown(e,t){if(this.lastKeyCode=t.keyCode,this.lastKeyTime=Date.now(),t.keyCode==9&&Date.now()s.keyCode==t.keyCode))&&!t.ctrlKey||ou.indexOf(t.key)>-1&&t.ctrlKey&&!t.shiftKey)?(this.pendingIOSKey=i||t,setTimeout(()=>this.flushIOSKey(e),250),!0):!1}flushIOSKey(e){let t=this.pendingIOSKey;return t?(this.pendingIOSKey=void 0,Qt(e.contentDOM,t.key,t.keyCode)):!1}ignoreDuringComposition(e){return/^key/.test(e.type)?this.composing>0?!0:M.safari&&!M.ios&&Date.now()-this.compositionEndedAt<100?(this.compositionEndedAt=0,!0):!1:!1}mustFlushObserver(e){return e.type=="keydown"&&e.keyCode!=229}startMouseSelection(e){this.mouseSelection&&this.mouseSelection.destroy(),this.mouseSelection=e}update(e){this.mouseSelection&&this.mouseSelection.update(e),e.transactions.length&&(this.lastKeyCode=this.lastSelectionTime=0)}destroy(){this.mouseSelection&&this.mouseSelection.destroy()}}const fh=[{key:"Backspace",keyCode:8,inputType:"deleteContentBackward"},{key:"Enter",keyCode:13,inputType:"insertParagraph"},{key:"Delete",keyCode:46,inputType:"deleteContentForward"}],ou="dthko",uh=[16,17,18,20,91,92,224,225];class lu{constructor(e,t,i,s){this.view=e,this.style=i,this.mustSelect=s,this.lastEvent=t;let r=e.contentDOM.ownerDocument;r.addEventListener("mousemove",this.move=this.move.bind(this)),r.addEventListener("mouseup",this.up=this.up.bind(this)),this.extend=t.shiftKey,this.multiple=e.state.facet(_.allowMultipleSelections)&&au(e,t),this.dragMove=hu(e,t),this.dragging=cu(e,t)&&gh(t)==1?null:!1,this.dragging===!1&&(t.preventDefault(),this.select(t))}move(e){if(e.buttons==0)return this.destroy();this.dragging===!1&&this.select(this.lastEvent=e)}up(e){this.dragging==null&&this.select(this.lastEvent),this.dragging||e.preventDefault(),this.destroy()}destroy(){let e=this.view.contentDOM.ownerDocument;e.removeEventListener("mousemove",this.move),e.removeEventListener("mouseup",this.up),this.view.inputState.mouseSelection=null}select(e){let t=this.style.get(e,this.extend,this.multiple);(this.mustSelect||!t.eq(this.view.state.selection)||t.main.assoc!=this.view.state.selection.main.assoc)&&this.view.dispatch({selection:t,userEvent:"select.pointer",scrollIntoView:!0}),this.mustSelect=!1}update(e){e.docChanged&&this.dragging&&(this.dragging=this.dragging.map(e.changes)),this.style.update(e)&&setTimeout(()=>this.select(this.lastEvent),20)}}function au(n,e){let t=n.state.facet(Ga);return t.length?t[0](e):M.mac?e.metaKey:e.ctrlKey}function hu(n,e){let t=n.state.facet(Ja);return t.length?t[0](e):M.mac?!e.altKey:!e.ctrlKey}function cu(n,e){let{main:t}=n.state.selection;if(t.empty)return!1;let i=Mn(n.root);if(!i||i.rangeCount==0)return!0;let s=i.getRangeAt(0).getClientRects();for(let r=0;r=e.clientX&&o.top<=e.clientY&&o.bottom>=e.clientY)return!0}return!1}function _o(n,e){if(!e.bubbles)return!0;if(e.defaultPrevented)return!1;for(let t=e.target,i;t!=n.contentDOM;t=t.parentNode)if(!t||t.nodeType==11||(i=K.get(t))&&i.ignoreEvent(e))return!1;return!0}const he=Object.create(null),rr=Object.create(null),dh=M.ie&&M.ie_version<15||M.ios&&M.webkit_version<604;function fu(n){let e=n.dom.parentNode;if(!e)return;let t=e.appendChild(document.createElement("textarea"));t.style.cssText="position: fixed; left: -10000px; top: 10px",t.focus(),setTimeout(()=>{n.focus(),t.remove(),ph(n,t.value)},50)}function ph(n,e){let{state:t}=n,i,s=1,r=t.toText(e),o=r.lines==t.selection.ranges.length;if(or!=null&&t.selection.ranges.every(a=>a.empty)&&or==r.toString()){let a=-1;i=t.changeByRange(h=>{let c=t.doc.lineAt(h.from);if(c.from==a)return{range:h};a=c.from;let f=t.toText((o?r.line(s++).text:e)+t.lineBreak);return{changes:{from:c.from,insert:f},range:k.cursor(h.from+f.length)}})}else o?i=t.changeByRange(a=>{let h=r.line(s++);return{changes:{from:a.from,to:a.to,insert:h.text},range:k.cursor(a.from+h.length)}}):i=t.replaceSelection(r);n.dispatch(i,{userEvent:"input.paste",scrollIntoView:!0})}he.keydown=(n,e)=>{n.inputState.setSelectionOrigin("select"),e.keyCode==27?n.inputState.lastEscPress=Date.now():uh.indexOf(e.keyCode)<0&&(n.inputState.lastEscPress=0)};he.touchstart=(n,e)=>{n.inputState.lastTouchTime=Date.now(),n.inputState.setSelectionOrigin("select.pointer")};he.touchmove=n=>{n.inputState.setSelectionOrigin("select.pointer")};rr.touchstart=rr.touchmove={passive:!0};he.mousedown=(n,e)=>{if(n.observer.flush(),n.inputState.lastTouchTime>Date.now()-2e3)return;let t=null;for(let i of n.state.facet(Ya))if(t=i(n,e),t)break;if(!t&&e.button==0&&(t=pu(n,e)),t){let i=n.root.activeElement!=n.contentDOM;i&&n.observer.ignore(()=>La(n.contentDOM)),n.inputState.startMouseSelection(new lu(n,e,t,i))}};function No(n,e,t,i){if(i==1)return k.cursor(e,t);if(i==2)return Zf(n.state,e,t);{let s=ke.find(n.docView,e),r=n.state.doc.lineAt(s?s.posAtEnd:e),o=s?s.posAtStart:r.from,l=s?s.posAtEnd:r.to;return ln>=e.top&&n<=e.bottom,Vo=(n,e,t)=>mh(e,t)&&n>=t.left&&n<=t.right;function uu(n,e,t,i){let s=ke.find(n.docView,e);if(!s)return 1;let r=e-s.posAtStart;if(r==0)return 1;if(r==s.length)return-1;let o=s.coordsAt(r,-1);if(o&&Vo(t,i,o))return-1;let l=s.coordsAt(r,1);return l&&Vo(t,i,l)?1:o&&mh(i,o)?-1:1}function Fo(n,e){let t=n.posAtCoords({x:e.clientX,y:e.clientY},!1);return{pos:t,bias:uu(n,t,e.clientX,e.clientY)}}const du=M.ie&&M.ie_version<=11;let Ho=null,Wo=0,zo=0;function gh(n){if(!du)return n.detail;let e=Ho,t=zo;return Ho=n,zo=Date.now(),Wo=!e||t>Date.now()-400&&Math.abs(e.clientX-n.clientX)<2&&Math.abs(e.clientY-n.clientY)<2?(Wo+1)%3:1}function pu(n,e){let t=Fo(n,e),i=gh(e),s=n.state.selection,r=t,o=e;return{update(l){l.docChanged&&(t.pos=l.changes.mapPos(t.pos),s=s.map(l.changes),o=null)},get(l,a,h){let c;o&&l.clientX==o.clientX&&l.clientY==o.clientY?c=r:(c=r=Fo(n,l),o=l);let f=No(n,c.pos,c.bias,i);if(t.pos!=c.pos&&!a){let u=No(n,t.pos,t.bias,i),d=Math.min(u.from,f.from),p=Math.max(u.to,f.to);f=d1&&s.ranges.some(u=>u.eq(f))?mu(s,f):h?s.addRange(f):k.create([f])}}}function mu(n,e){for(let t=0;;t++)if(n.ranges[t].eq(e))return k.create(n.ranges.slice(0,t).concat(n.ranges.slice(t+1)),n.mainIndex==t?0:n.mainIndex-(n.mainIndex>t?1:0))}he.dragstart=(n,e)=>{let{selection:{main:t}}=n.state,{mouseSelection:i}=n.inputState;i&&(i.dragging=t),e.dataTransfer&&(e.dataTransfer.setData("Text",n.state.sliceDoc(t.from,t.to)),e.dataTransfer.effectAllowed="copyMove")};function qo(n,e,t,i){if(!t)return;let s=n.posAtCoords({x:e.clientX,y:e.clientY},!1);e.preventDefault();let{mouseSelection:r}=n.inputState,o=i&&r&&r.dragging&&r.dragMove?{from:r.dragging.from,to:r.dragging.to}:null,l={from:s,insert:t},a=n.state.changes(o?[o,l]:l);n.focus(),n.dispatch({changes:a,selection:{anchor:a.mapPos(s,-1),head:a.mapPos(s,1)},userEvent:o?"move.drop":"input.drop"})}he.drop=(n,e)=>{if(!e.dataTransfer)return;if(n.state.readOnly)return e.preventDefault();let t=e.dataTransfer.files;if(t&&t.length){e.preventDefault();let i=Array(t.length),s=0,r=()=>{++s==t.length&&qo(n,e,i.filter(o=>o!=null).join(n.state.lineBreak),!1)};for(let o=0;o{/[\x00-\x08\x0e-\x1f]{2}/.test(l.result)||(i[o]=l.result),r()},l.readAsText(t[o])}}else qo(n,e,e.dataTransfer.getData("Text"),!0)};he.paste=(n,e)=>{if(n.state.readOnly)return e.preventDefault();n.observer.flush();let t=dh?null:e.clipboardData;t?(ph(n,t.getData("text/plain")),e.preventDefault()):fu(n)};function gu(n,e){let t=n.dom.parentNode;if(!t)return;let i=t.appendChild(document.createElement("textarea"));i.style.cssText="position: fixed; left: -10000px; top: 10px",i.value=e,i.focus(),i.selectionEnd=e.length,i.selectionStart=0,setTimeout(()=>{i.remove(),n.focus()},50)}function bu(n){let e=[],t=[],i=!1;for(let s of n.selection.ranges)s.empty||(e.push(n.sliceDoc(s.from,s.to)),t.push(s));if(!e.length){let s=-1;for(let{from:r}of n.selection.ranges){let o=n.doc.lineAt(r);o.number>s&&(e.push(o.text),t.push({from:o.from,to:Math.min(n.doc.length,o.to+1)})),s=o.number}i=!0}return{text:e.join(n.lineBreak),ranges:t,linewise:i}}let or=null;he.copy=he.cut=(n,e)=>{let{text:t,ranges:i,linewise:s}=bu(n.state);if(!t&&!s)return;or=s?t:null;let r=dh?null:e.clipboardData;r?(e.preventDefault(),r.clearData(),r.setData("text/plain",t)):gu(n,t),e.type=="cut"&&!n.state.readOnly&&n.dispatch({changes:i,scrollIntoView:!0,userEvent:"delete.cut"})};function bh(n){setTimeout(()=>{n.hasFocus!=n.inputState.notifiedFocused&&n.update([])},10)}he.focus=n=>{n.inputState.lastFocusTime=Date.now(),!n.scrollDOM.scrollTop&&(n.inputState.lastScrollTop||n.inputState.lastScrollLeft)&&(n.scrollDOM.scrollTop=n.inputState.lastScrollTop,n.scrollDOM.scrollLeft=n.inputState.lastScrollLeft),bh(n)};he.blur=n=>{n.observer.clearSelectionRange(),bh(n)};he.compositionstart=he.compositionupdate=n=>{n.inputState.compositionFirstChange==null&&(n.inputState.compositionFirstChange=!0),n.inputState.composing<0&&(n.inputState.composing=0)};he.compositionend=n=>{n.inputState.composing=-1,n.inputState.compositionEndedAt=Date.now(),n.inputState.compositionFirstChange=null,M.chrome&&M.android&&n.observer.flushSoon(),setTimeout(()=>{n.inputState.composing<0&&n.docView.compositionDeco.size&&n.update([])},50)};he.contextmenu=n=>{n.inputState.lastContextMenu=Date.now()};he.beforeinput=(n,e)=>{var t;let i;if(M.chrome&&M.android&&(i=fh.find(s=>s.inputType==e.inputType))&&(n.observer.delayAndroidKey(i.key,i.keyCode),i.key=="Backspace"||i.key=="Delete")){let s=((t=window.visualViewport)===null||t===void 0?void 0:t.height)||0;setTimeout(()=>{var r;(((r=window.visualViewport)===null||r===void 0?void 0:r.height)||0)>s+10&&n.hasFocus&&(n.contentDOM.blur(),n.focus())},100)}};const jo=["pre-wrap","normal","pre-line","break-spaces"];class yu{constructor(){this.doc=N.empty,this.lineWrapping=!1,this.heightSamples={},this.lineHeight=14,this.charWidth=7,this.lineLength=30,this.heightChanged=!1}heightForGap(e,t){let i=this.doc.lineAt(t).number-this.doc.lineAt(e).number+1;return this.lineWrapping&&(i+=Math.ceil((t-e-i*this.lineLength*.5)/this.lineLength)),this.lineHeight*i}heightForLine(e){return this.lineWrapping?(1+Math.max(0,Math.ceil((e-this.lineLength)/(this.lineLength-5))))*this.lineHeight:this.lineHeight}setDoc(e){return this.doc=e,this}mustRefreshForWrapping(e){return jo.indexOf(e)>-1!=this.lineWrapping}mustRefreshForHeights(e){let t=!1;for(let i=0;i-1,l=Math.round(t)!=Math.round(this.lineHeight)||this.lineWrapping!=o;if(this.lineWrapping=o,this.lineHeight=t,this.charWidth=i,this.lineLength=s,l){this.heightSamples={};for(let a=0;a0}set outdated(e){this.flags=(e?2:0)|this.flags&-3}setHeight(e,t){this.height!=t&&(Math.abs(this.height-t)>yn&&(e.heightChanged=!0),this.height=t)}replace(e,t,i){return ve.of(i)}decomposeLeft(e,t){t.push(this)}decomposeRight(e,t){t.push(this)}applyChanges(e,t,i,s){let r=this;for(let o=s.length-1;o>=0;o--){let{fromA:l,toA:a,fromB:h,toB:c}=s[o],f=r.lineAt(l,j.ByPosNoHeight,t,0,0),u=f.to>=a?f:r.lineAt(a,j.ByPosNoHeight,t,0,0);for(c+=u.to-a,a=u.to;o>0&&f.from<=s[o-1].toA;)l=s[o-1].fromA,h=s[o-1].fromB,o--,lr*2){let l=e[t-1];l.break?e.splice(--t,1,l.left,null,l.right):e.splice(--t,1,l.left,l.right),i+=1+l.break,s-=l.size}else if(r>s*2){let l=e[i];l.break?e.splice(i,1,l.left,null,l.right):e.splice(i,1,l.left,l.right),i+=2+l.break,r-=l.size}else break;else if(s=r&&o(this.blockAt(0,i,s,r))}updateHeight(e,t=0,i=!1,s){return s&&s.from<=t&&s.more&&this.setHeight(e,s.heights[s.index++]),this.outdated=!1,this}toString(){return`block(${this.length})`}}class De extends yh{constructor(e,t){super(e,t,z.Text),this.collapsed=0,this.widgetHeight=0}replace(e,t,i){let s=i[0];return i.length==1&&(s instanceof De||s instanceof fe&&s.flags&4)&&Math.abs(this.length-s.length)<10?(s instanceof fe?s=new De(s.length,this.height):s.height=this.height,this.outdated||(s.outdated=!1),s):ve.of(i)}updateHeight(e,t=0,i=!1,s){return s&&s.from<=t&&s.more?this.setHeight(e,s.heights[s.index++]):(i||this.outdated)&&this.setHeight(e,Math.max(this.widgetHeight,e.heightForLine(this.length-this.collapsed))),this.outdated=!1,this}toString(){return`line(${this.length}${this.collapsed?-this.collapsed:""}${this.widgetHeight?":"+this.widgetHeight:""})`}}class fe extends ve{constructor(e){super(e,0)}lines(e,t){let i=e.lineAt(t).number,s=e.lineAt(t+this.length).number;return{firstLine:i,lastLine:s,lineHeight:this.height/(s-i+1)}}blockAt(e,t,i,s){let{firstLine:r,lastLine:o,lineHeight:l}=this.lines(t,s),a=Math.max(0,Math.min(o-r,Math.floor((e-i)/l))),{from:h,length:c}=t.line(r+a);return new ut(h,c,i+l*a,l,z.Text)}lineAt(e,t,i,s,r){if(t==j.ByHeight)return this.blockAt(e,i,s,r);if(t==j.ByPosNoHeight){let{from:f,to:u}=i.lineAt(e);return new ut(f,u-f,0,0,z.Text)}let{firstLine:o,lineHeight:l}=this.lines(i,r),{from:a,length:h,number:c}=i.lineAt(e);return new ut(a,h,s+l*(c-o),l,z.Text)}forEachLine(e,t,i,s,r,o){let{firstLine:l,lineHeight:a}=this.lines(i,r);for(let h=Math.max(e,r),c=Math.min(r+this.length,t);h<=c;){let f=i.lineAt(h);h==e&&(s+=a*(f.number-l)),o(new ut(f.from,f.length,s,a,z.Text)),s+=a,h=f.to+1}}replace(e,t,i){let s=this.length-t;if(s>0){let r=i[i.length-1];r instanceof fe?i[i.length-1]=new fe(r.length+s):i.push(null,new fe(s-1))}if(e>0){let r=i[0];r instanceof fe?i[0]=new fe(e+r.length):i.unshift(new fe(e-1),null)}return ve.of(i)}decomposeLeft(e,t){t.push(new fe(e-1),null)}decomposeRight(e,t){t.push(null,new fe(this.length-e-1))}updateHeight(e,t=0,i=!1,s){let r=t+this.length;if(s&&s.from<=t+this.length&&s.more){let o=[],l=Math.max(t,s.from),a=-1,h=e.heightChanged;for(s.from>t&&o.push(new fe(s.from-t-1).updateHeight(e,t));l<=r&&s.more;){let f=e.doc.lineAt(l).length;o.length&&o.push(null);let u=s.heights[s.index++];a==-1?a=u:Math.abs(u-a)>=yn&&(a=-2);let d=new De(f,u);d.outdated=!1,o.push(d),l+=f+1}l<=r&&o.push(null,new fe(r-l).updateHeight(e,l));let c=ve.of(o);return e.heightChanged=h||a<0||Math.abs(c.height-this.height)>=yn||Math.abs(a-this.lines(e.doc,t).lineHeight)>=yn,c}else(i||this.outdated)&&(this.setHeight(e,e.heightForGap(t,t+this.length)),this.outdated=!1);return this}toString(){return`gap(${this.length})`}}class ku extends ve{constructor(e,t,i){super(e.length+t+i.length,e.height+i.height,t|(e.outdated||i.outdated?2:0)),this.left=e,this.right=i,this.size=e.size+i.size}get break(){return this.flags&1}blockAt(e,t,i,s){let r=i+this.left.height;return el))return h;let c=t==j.ByPosNoHeight?j.ByPosNoHeight:j.ByPos;return a?h.join(this.right.lineAt(l,c,i,o,l)):this.left.lineAt(l,c,i,s,r).join(h)}forEachLine(e,t,i,s,r,o){let l=s+this.left.height,a=r+this.left.length+this.break;if(this.break)e=a&&this.right.forEachLine(e,t,i,l,a,o);else{let h=this.lineAt(a,j.ByPos,i,s,r);e=e&&h.from<=t&&o(h),t>h.to&&this.right.forEachLine(h.to+1,t,i,l,a,o)}}replace(e,t,i){let s=this.left.length+this.break;if(tthis.left.length)return this.balanced(this.left,this.right.replace(e-s,t-s,i));let r=[];e>0&&this.decomposeLeft(e,r);let o=r.length;for(let l of i)r.push(l);if(e>0&&Ko(r,o-1),t=i&&t.push(null)),e>i&&this.right.decomposeLeft(e-i,t)}decomposeRight(e,t){let i=this.left.length,s=i+this.break;if(e>=s)return this.right.decomposeRight(e-s,t);e2*t.size||t.size>2*e.size?ve.of(this.break?[e,null,t]:[e,t]):(this.left=e,this.right=t,this.height=e.height+t.height,this.outdated=e.outdated||t.outdated,this.size=e.size+t.size,this.length=e.length+this.break+t.length,this)}updateHeight(e,t=0,i=!1,s){let{left:r,right:o}=this,l=t+r.length+this.break,a=null;return s&&s.from<=t+r.length&&s.more?a=r=r.updateHeight(e,t,i,s):r.updateHeight(e,t,i),s&&s.from<=l+o.length&&s.more?a=o=o.updateHeight(e,l,i,s):o.updateHeight(e,l,i),a?this.balanced(r,o):(this.height=this.left.height+this.right.height,this.outdated=!1,this)}toString(){return this.left+(this.break?" ":"-")+this.right}}function Ko(n,e){let t,i;n[e]==null&&(t=n[e-1])instanceof fe&&(i=n[e+1])instanceof fe&&n.splice(e-1,3,new fe(t.length+1+i.length))}const vu=5;class Ir{constructor(e,t){this.pos=e,this.oracle=t,this.nodes=[],this.lineStart=-1,this.lineEnd=-1,this.covering=null,this.writtenTo=e}get isCovered(){return this.covering&&this.nodes[this.nodes.length-1]==this.covering}span(e,t){if(this.lineStart>-1){let i=Math.min(t,this.lineEnd),s=this.nodes[this.nodes.length-1];s instanceof De?s.length+=i-this.pos:(i>this.pos||!this.isCovered)&&this.nodes.push(new De(i-this.pos,-1)),this.writtenTo=i,t>i&&(this.nodes.push(null),this.writtenTo++,this.lineStart=-1)}this.pos=t}point(e,t,i){if(e=vu)&&this.addLineDeco(s,r)}else t>e&&this.span(e,t);this.lineEnd>-1&&this.lineEnd-1)return;let{from:e,to:t}=this.oracle.doc.lineAt(this.pos);this.lineStart=e,this.lineEnd=t,this.writtenToe&&this.nodes.push(new De(this.pos-e,-1)),this.writtenTo=this.pos}blankContent(e,t){let i=new fe(t-e);return this.oracle.doc.lineAt(e).to==t&&(i.flags|=4),i}ensureLine(){this.enterLine();let e=this.nodes.length?this.nodes[this.nodes.length-1]:null;if(e instanceof De)return e;let t=new De(0,-1);return this.nodes.push(t),t}addBlock(e){this.enterLine(),e.type==z.WidgetAfter&&!this.isCovered&&this.ensureLine(),this.nodes.push(e),this.writtenTo=this.pos=this.pos+e.length,e.type!=z.WidgetBefore&&(this.covering=e)}addLineDeco(e,t){let i=this.ensureLine();i.length+=t,i.collapsed+=t,i.widgetHeight=Math.max(i.widgetHeight,e),this.writtenTo=this.pos=this.pos+t}finish(e){let t=this.nodes.length==0?null:this.nodes[this.nodes.length-1];this.lineStart>-1&&!(t instanceof De)&&!this.isCovered?this.nodes.push(new De(0,-1)):(this.writtenToc.clientHeight||c.scrollWidth>c.clientWidth)&&f.overflow!="visible"){let u=c.getBoundingClientRect();r=Math.max(r,u.left),o=Math.min(o,u.right),l=Math.max(l,u.top),a=h==n.parentNode?u.bottom:Math.min(a,u.bottom)}h=f.position=="absolute"||f.position=="fixed"?c.offsetParent:c.parentNode}else if(h.nodeType==11)h=h.host;else break;return{left:r-t.left,right:Math.max(r,o)-t.left,top:l-(t.top+e),bottom:Math.max(l,a)-(t.top+e)}}function Au(n,e){let t=n.getBoundingClientRect();return{left:0,right:t.right-t.left,top:e,bottom:t.bottom-(t.top+e)}}class cs{constructor(e,t,i){this.from=e,this.to=t,this.size=i}static same(e,t){if(e.length!=t.length)return!1;for(let i=0;itypeof t!="function"),this.heightMap=ve.empty().applyChanges(this.stateDeco,N.empty,this.heightOracle.setDoc(e.doc),[new $e(0,0,0,e.doc.length)]),this.viewport=this.getViewport(0,null),this.updateViewportLines(),this.updateForViewport(),this.lineGaps=this.ensureLineGaps([]),this.lineGapDeco=E.set(this.lineGaps.map(t=>t.draw(!1))),this.computeVisibleRanges()}updateForViewport(){let e=[this.viewport],{main:t}=this.state.selection;for(let i=0;i<=1;i++){let s=i?t.head:t.anchor;if(!e.some(({from:r,to:o})=>s>=r&&s<=o)){let{from:r,to:o}=this.lineBlockAt(s);e.push(new $i(r,o))}}this.viewports=e.sort((i,s)=>i.from-s.from),this.scaler=this.heightMap.height<=7e6?Go:new Ou(this.heightOracle.doc,this.heightMap,this.viewports)}updateViewportLines(){this.viewportLines=[],this.heightMap.forEachLine(this.viewport.from,this.viewport.to,this.state.doc,0,0,e=>{this.viewportLines.push(this.scaler.scale==1?e:xi(e,this.scaler))})}update(e,t=null){this.state=e.state;let i=this.stateDeco;this.stateDeco=this.state.facet(Ii).filter(h=>typeof h!="function");let s=e.changedRanges,r=$e.extendWithRanges(s,xu(i,this.stateDeco,e?e.changes:ne.empty(this.state.doc.length))),o=this.heightMap.height;this.heightMap=this.heightMap.applyChanges(this.stateDeco,e.startState.doc,this.heightOracle.setDoc(this.state.doc),r),this.heightMap.height!=o&&(e.flags|=2);let l=r.length?this.mapViewport(this.viewport,e.changes):this.viewport;(t&&(t.range.headl.to)||!this.viewportIsAppropriate(l))&&(l=this.getViewport(0,t));let a=!e.changes.empty||e.flags&2||l.from!=this.viewport.from||l.to!=this.viewport.to;this.viewport=l,this.updateForViewport(),a&&this.updateViewportLines(),(this.lineGaps.length||this.viewport.to-this.viewport.from>2e3<<1)&&this.updateLineGaps(this.ensureLineGaps(this.mapLineGaps(this.lineGaps,e.changes))),e.flags|=this.computeVisibleRanges(),t&&(this.scrollTarget=t),!this.mustEnforceCursorAssoc&&e.selectionSet&&e.view.lineWrapping&&e.state.selection.main.empty&&e.state.selection.main.assoc&&!e.state.facet($a)&&(this.mustEnforceCursorAssoc=!0)}measure(e){let t=e.contentDOM,i=window.getComputedStyle(t),s=this.heightOracle,r=i.whiteSpace;this.defaultTextDirection=i.direction=="rtl"?Q.RTL:Q.LTR;let o=this.heightOracle.mustRefreshForWrapping(r),l=o||this.mustMeasureContent||this.contentDOMHeight!=t.clientHeight;this.contentDOMHeight=t.clientHeight,this.mustMeasureContent=!1;let a=0,h=0,c=parseInt(i.paddingTop)||0,f=parseInt(i.paddingBottom)||0;(this.paddingTop!=c||this.paddingBottom!=f)&&(this.paddingTop=c,this.paddingBottom=f,a|=10),this.editorWidth!=e.scrollDOM.clientWidth&&(s.lineWrapping&&(l=!0),this.editorWidth=e.scrollDOM.clientWidth,a|=8);let u=(this.printing?Au:Cu)(t,this.paddingTop),d=u.top-this.pixelViewport.top,p=u.bottom-this.pixelViewport.bottom;this.pixelViewport=u;let g=this.pixelViewport.bottom>this.pixelViewport.top&&this.pixelViewport.right>this.pixelViewport.left;if(g!=this.inView&&(this.inView=g,g&&(l=!0)),!this.inView&&!this.scrollTarget)return 0;let b=t.clientWidth;if((this.contentDOMWidth!=b||this.editorHeight!=e.scrollDOM.clientHeight)&&(this.contentDOMWidth=b,this.editorHeight=e.scrollDOM.clientHeight,a|=8),l){let y=e.docView.measureVisibleLineHeights(this.viewport);if(s.mustRefreshForHeights(y)&&(o=!0),o||s.lineWrapping&&Math.abs(b-this.contentDOMWidth)>s.charWidth){let{lineHeight:S,charWidth:C}=e.docView.measureTextSize();o=S>0&&s.refresh(r,S,C,b/C,y),o&&(e.docView.minWidth=0,a|=8)}d>0&&p>0?h=Math.max(d,p):d<0&&p<0&&(h=Math.min(d,p)),s.heightChanged=!1;for(let S of this.viewports){let C=S.from==this.viewport.from?y:e.docView.measureVisibleLineHeights(S);this.heightMap=o?ve.empty().applyChanges(this.stateDeco,N.empty,this.heightOracle,[new $e(0,0,0,e.state.doc.length)]):this.heightMap.updateHeight(s,0,o,new wu(S.from,C))}s.heightChanged&&(a|=2)}let w=!this.viewportIsAppropriate(this.viewport,h)||this.scrollTarget&&(this.scrollTarget.range.headthis.viewport.to);return w&&(this.viewport=this.getViewport(h,this.scrollTarget)),this.updateForViewport(),(a&2||w)&&this.updateViewportLines(),(this.lineGaps.length||this.viewport.to-this.viewport.from>2e3<<1)&&this.updateLineGaps(this.ensureLineGaps(o?[]:this.lineGaps,e)),a|=this.computeVisibleRanges(),this.mustEnforceCursorAssoc&&(this.mustEnforceCursorAssoc=!1,e.docView.enforceCursorAssoc()),a}get visibleTop(){return this.scaler.fromDOM(this.pixelViewport.top)}get visibleBottom(){return this.scaler.fromDOM(this.pixelViewport.bottom)}getViewport(e,t){let i=.5-Math.max(-.5,Math.min(.5,e/1e3/2)),s=this.heightMap,r=this.state.doc,{visibleTop:o,visibleBottom:l}=this,a=new $i(s.lineAt(o-i*1e3,j.ByHeight,r,0,0).from,s.lineAt(l+(1-i)*1e3,j.ByHeight,r,0,0).to);if(t){let{head:h}=t.range;if(ha.to){let c=Math.min(this.editorHeight,this.pixelViewport.bottom-this.pixelViewport.top),f=s.lineAt(h,j.ByPos,r,0,0),u;t.y=="center"?u=(f.top+f.bottom)/2-c/2:t.y=="start"||t.y=="nearest"&&h=l+Math.max(10,Math.min(i,250)))&&s>o-2*1e3&&r>1,o=s<<1;if(this.defaultTextDirection!=Q.LTR&&!i)return[];let l=[],a=(h,c,f,u)=>{if(c-hh&&bb.from>=f.from&&b.to<=f.to&&Math.abs(b.from-h)b.fromw));if(!g){if(cb.from<=c&&b.to>=c)){let b=t.moveToLineBoundary(k.cursor(c),!1,!0).head;b>h&&(c=b)}g=new cs(h,c,this.gapSize(f,h,c,u))}l.push(g)};for(let h of this.viewportLines){if(h.lengthh.from&&a(h.from,u,h,c),dt.draw(this.heightOracle.lineWrapping))))}computeVisibleRanges(){let e=this.stateDeco;this.lineGaps.length&&(e=e.concat(this.lineGapDeco));let t=[];H.spans(e,this.viewport.from,this.viewport.to,{span(s,r){t.push({from:s,to:r})},point(){}},20);let i=t.length!=this.visibleRanges.length||this.visibleRanges.some((s,r)=>s.from!=t[r].from||s.to!=t[r].to);return this.visibleRanges=t,i?4:0}lineBlockAt(e){return e>=this.viewport.from&&e<=this.viewport.to&&this.viewportLines.find(t=>t.from<=e&&t.to>=e)||xi(this.heightMap.lineAt(e,j.ByPos,this.state.doc,0,0),this.scaler)}lineBlockAtHeight(e){return xi(this.heightMap.lineAt(this.scaler.fromDOM(e),j.ByHeight,this.state.doc,0,0),this.scaler)}elementAtHeight(e){return xi(this.heightMap.blockAt(this.scaler.fromDOM(e),this.state.doc,0,0),this.scaler)}get docHeight(){return this.scaler.toDOM(this.heightMap.height)}get contentHeight(){return this.docHeight+this.paddingTop+this.paddingBottom}}class $i{constructor(e,t){this.from=e,this.to=t}}function Du(n,e,t){let i=[],s=n,r=0;return H.spans(t,n,e,{span(){},point(o,l){o>s&&(i.push({from:s,to:o}),r+=o-s),s=l}},20),s=1)return e[e.length-1].to;let i=Math.floor(n*t);for(let s=0;;s++){let{from:r,to:o}=e[s],l=o-r;if(i<=l)return r+i;i-=l}}function tn(n,e){let t=0;for(let{from:i,to:s}of n.ranges){if(e<=s){t+=e-i;break}t+=s-i}return t/n.total}function Tu(n,e){for(let t of n)if(e(t))return t}const Go={toDOM(n){return n},fromDOM(n){return n},scale:1};class Ou{constructor(e,t,i){let s=0,r=0,o=0;this.viewports=i.map(({from:l,to:a})=>{let h=t.lineAt(l,j.ByPos,e,0,0).top,c=t.lineAt(a,j.ByPos,e,0,0).bottom;return s+=c-h,{from:l,to:a,top:h,bottom:c,domTop:0,domBottom:0}}),this.scale=(7e6-s)/(t.height-s);for(let l of this.viewports)l.domTop=o+(l.top-r)*this.scale,o=l.domBottom=l.domTop+(l.bottom-l.top),r=l.bottom}toDOM(e){for(let t=0,i=0,s=0;;t++){let r=txi(s,e)):n.type)}const nn=O.define({combine:n=>n.join(" ")}),lr=O.define({combine:n=>n.indexOf(!0)>-1}),ar=mt.newName(),wh=mt.newName(),kh=mt.newName(),vh={"&light":"."+wh,"&dark":"."+kh};function hr(n,e,t){return new mt(e,{finish(i){return/&/.test(i)?i.replace(/&\w*/,s=>{if(s=="&")return n;if(!t||!t[s])throw new RangeError(`Unsupported selector: ${s}`);return t[s]}):n+" "+i}})}const Bu=hr("."+ar,{"&.cm-editor":{position:"relative !important",boxSizing:"border-box","&.cm-focused":{outline:"1px dotted #212121"},display:"flex !important",flexDirection:"column"},".cm-scroller":{display:"flex !important",alignItems:"flex-start !important",fontFamily:"monospace",lineHeight:1.4,height:"100%",overflowX:"auto",position:"relative",zIndex:0},".cm-content":{margin:0,flexGrow:2,flexShrink:0,minHeight:"100%",display:"block",whiteSpace:"pre",wordWrap:"normal",boxSizing:"border-box",padding:"4px 0",outline:"none","&[contenteditable=true]":{WebkitUserModify:"read-write-plaintext-only"}},".cm-lineWrapping":{whiteSpace_fallback:"pre-wrap",whiteSpace:"break-spaces",wordBreak:"break-word",overflowWrap:"anywhere",flexShrink:1},"&light .cm-content":{caretColor:"black"},"&dark .cm-content":{caretColor:"white"},".cm-line":{display:"block",padding:"0 2px 0 4px"},".cm-selectionLayer":{zIndex:-1,contain:"size style"},".cm-selectionBackground":{position:"absolute"},"&light .cm-selectionBackground":{background:"#d9d9d9"},"&dark .cm-selectionBackground":{background:"#222"},"&light.cm-focused .cm-selectionBackground":{background:"#d7d4f0"},"&dark.cm-focused .cm-selectionBackground":{background:"#233"},".cm-cursorLayer":{zIndex:100,contain:"size style",pointerEvents:"none"},"&.cm-focused .cm-cursorLayer":{animation:"steps(1) cm-blink 1.2s infinite"},"@keyframes cm-blink":{"0%":{},"50%":{opacity:0},"100%":{}},"@keyframes cm-blink2":{"0%":{},"50%":{opacity:0},"100%":{}},".cm-cursor, .cm-dropCursor":{position:"absolute",borderLeft:"1.2px solid black",marginLeft:"-0.6px",pointerEvents:"none"},".cm-cursor":{display:"none"},"&dark .cm-cursor":{borderLeftColor:"#444"},"&.cm-focused .cm-cursor":{display:"block"},"&light .cm-activeLine":{backgroundColor:"#cceeff44"},"&dark .cm-activeLine":{backgroundColor:"#99eeff33"},"&light .cm-specialChar":{color:"red"},"&dark .cm-specialChar":{color:"#f78"},".cm-gutters":{flexShrink:0,display:"flex",height:"100%",boxSizing:"border-box",left:0,zIndex:200},"&light .cm-gutters":{backgroundColor:"#f5f5f5",color:"#6c6c6c",borderRight:"1px solid #ddd"},"&dark .cm-gutters":{backgroundColor:"#333338",color:"#ccc"},".cm-gutter":{display:"flex !important",flexDirection:"column",flexShrink:0,boxSizing:"border-box",minHeight:"100%",overflow:"hidden"},".cm-gutterElement":{boxSizing:"border-box"},".cm-lineNumbers .cm-gutterElement":{padding:"0 3px 0 5px",minWidth:"20px",textAlign:"right",whiteSpace:"nowrap"},"&light .cm-activeLineGutter":{backgroundColor:"#e2f2ff"},"&dark .cm-activeLineGutter":{backgroundColor:"#222227"},".cm-panels":{boxSizing:"border-box",position:"sticky",left:0,right:0},"&light .cm-panels":{backgroundColor:"#f5f5f5",color:"black"},"&light .cm-panels-top":{borderBottom:"1px solid #ddd"},"&light .cm-panels-bottom":{borderTop:"1px solid #ddd"},"&dark .cm-panels":{backgroundColor:"#333338",color:"white"},".cm-tab":{display:"inline-block",overflow:"hidden",verticalAlign:"bottom"},".cm-widgetBuffer":{verticalAlign:"text-top",height:"1em",width:0,display:"inline"},".cm-placeholder":{color:"#888",display:"inline-block",verticalAlign:"top"},".cm-button":{verticalAlign:"middle",color:"inherit",fontSize:"70%",padding:".2em 1em",borderRadius:"1px"},"&light .cm-button":{backgroundImage:"linear-gradient(#eff1f5, #d9d9df)",border:"1px solid #888","&:active":{backgroundImage:"linear-gradient(#b4b4b4, #d0d3d6)"}},"&dark .cm-button":{backgroundImage:"linear-gradient(#393939, #111)",border:"1px solid #888","&:active":{backgroundImage:"linear-gradient(#111, #333)"}},".cm-textfield":{verticalAlign:"middle",color:"inherit",fontSize:"70%",border:"1px solid silver",padding:".2em .5em"},"&light .cm-textfield":{backgroundColor:"white"},"&dark .cm-textfield":{border:"1px solid #555",backgroundColor:"inherit"}},vh);class Pu{constructor(e,t,i,s){this.typeOver=s,this.bounds=null,this.text="";let{impreciseHead:r,impreciseAnchor:o}=e.docView;if(t>-1&&!e.state.readOnly&&(this.bounds=e.docView.domBoundsAround(t,i,0))){let l=r||o?[]:Ru(e),a=new lh(l,e.state);a.readRange(this.bounds.startDOM,this.bounds.endDOM),this.text=a.text,this.newSel=Lu(l,this.bounds.from)}else{let l=e.observer.selectionRange,a=r&&r.node==l.focusNode&&r.offset==l.focusOffset||!ni(e.contentDOM,l.focusNode)?e.state.selection.main.head:e.docView.posFromDOM(l.focusNode,l.focusOffset),h=o&&o.node==l.anchorNode&&o.offset==l.anchorOffset||!ni(e.contentDOM,l.anchorNode)?e.state.selection.main.anchor:e.docView.posFromDOM(l.anchorNode,l.anchorOffset);this.newSel=k.single(h,a)}}}function xh(n,e){let t,{newSel:i}=e,s=n.state.selection.main;if(e.bounds){let{from:r,to:o}=e.bounds,l=s.from,a=null;(n.inputState.lastKeyCode===8&&n.inputState.lastKeyTime>Date.now()-100||M.android&&e.text.length=s.from&&t.to<=s.to&&(t.from!=s.from||t.to!=s.to)&&s.to-s.from-(t.to-t.from)<=4?t={from:s.from,to:s.to,insert:n.state.doc.slice(s.from,t.from).append(t.insert).append(n.state.doc.slice(t.to,s.to))}:(M.mac||M.android)&&t&&t.from==t.to&&t.from==s.head-1&&/^\. ?$/.test(t.insert.toString())?(i&&t.insert.length==2&&(i=k.single(i.main.anchor-1,i.main.head-1)),t={from:s.from,to:s.to,insert:N.of([" "])}):M.chrome&&t&&t.from==t.to&&t.from==s.head&&t.insert.toString()==` - `&&n.lineWrapping&&(i&&(i=k.single(i.main.anchor-1,i.main.head-1)),t={from:s.from,to:s.to,insert:N.of([" "])}),t){let r=n.state;if(M.ios&&n.inputState.flushIOSKey(n)||M.android&&(t.from==s.from&&t.to==s.to&&t.insert.length==1&&t.insert.lines==2&&Qt(n.contentDOM,"Enter",13)||t.from==s.from-1&&t.to==s.to&&t.insert.length==0&&Qt(n.contentDOM,"Backspace",8)||t.from==s.from&&t.to==s.to+1&&t.insert.length==0&&Qt(n.contentDOM,"Delete",46)))return!0;let o=t.insert.toString();if(n.state.facet(Za).some(h=>h(n,t.from,t.to,o)))return!0;n.inputState.composing>=0&&n.inputState.composing++;let l;if(t.from>=s.from&&t.to<=s.to&&t.to-t.from>=(s.to-s.from)/3&&(!i||i.main.empty&&i.main.from==t.from+t.insert.length)&&n.inputState.composing<0){let h=s.fromt.to?r.sliceDoc(t.to,s.to):"";l=r.replaceSelection(n.state.toText(h+t.insert.sliceString(0,void 0,n.state.lineBreak)+c))}else{let h=r.changes(t),c=i&&!r.selection.main.eq(i.main)&&i.main.to<=h.newLength?i.main:void 0;if(r.selection.ranges.length>1&&n.inputState.composing>=0&&t.to<=s.to&&t.to>=s.to-10){let f=n.state.sliceDoc(t.from,t.to),u=ah(n)||n.state.doc.lineAt(s.head),d=s.to-t.to,p=s.to-s.from;l=r.changeByRange(g=>{if(g.from==s.from&&g.to==s.to)return{changes:h,range:c||g.map(h)};let b=g.to-d,w=b-f.length;if(g.to-g.from!=p||n.state.sliceDoc(w,b)!=f||u&&g.to>=u.from&&g.from<=u.to)return{range:g};let y=r.changes({from:w,to:b,insert:t.insert}),S=g.to-s.to;return{changes:y,range:c?k.range(Math.max(0,c.anchor+S),Math.max(0,c.head+S)):g.map(y)}})}else l={changes:h,selection:c&&r.selection.replaceRange(c)}}let a="input.type";return n.composing&&(a+=".compose",n.inputState.compositionFirstChange&&(a+=".start",n.inputState.compositionFirstChange=!1)),n.dispatch(l,{scrollIntoView:!0,userEvent:a}),!0}else if(i&&!i.main.eq(s)){let r=!1,o="select";return n.inputState.lastSelectionTime>Date.now()-50&&(n.inputState.lastSelectionOrigin=="select"&&(r=!0),o=n.inputState.lastSelectionOrigin),n.dispatch({selection:i,scrollIntoView:r,userEvent:o}),!0}else return!1}function Eu(n,e,t,i){let s=Math.min(n.length,e.length),r=0;for(;r0&&l>0&&n.charCodeAt(o-1)==e.charCodeAt(l-1);)o--,l--;if(i=="end"){let a=Math.max(0,r-Math.min(o,l));t-=o+a-r}if(o=o?r-t:0;r-=a,l=r+(l-o),o=r}else if(l=l?r-t:0;r-=a,o=r+(o-l),l=r}return{from:r,toA:o,toB:l}}function Ru(n){let e=[];if(n.root.activeElement!=n.contentDOM)return e;let{anchorNode:t,anchorOffset:i,focusNode:s,focusOffset:r}=n.observer.selectionRange;return t&&(e.push(new To(t,i)),(s!=t||r!=i)&&e.push(new To(s,r))),e}function Lu(n,e){if(n.length==0)return null;let t=n[0].pos,i=n.length==2?n[1].pos:t;return t>-1&&i>-1?k.single(t+e,i+e):null}const Iu={childList:!0,characterData:!0,subtree:!0,attributes:!0,characterDataOldValue:!0},fs=M.ie&&M.ie_version<=11;class _u{constructor(e){this.view=e,this.active=!1,this.selectionRange=new Of,this.selectionChanged=!1,this.delayedFlush=-1,this.resizeTimeout=-1,this.queue=[],this.delayedAndroidKey=null,this.flushingAndroidKey=-1,this.lastChange=0,this.scrollTargets=[],this.intersection=null,this.resize=null,this.intersecting=!1,this.gapIntersection=null,this.gaps=[],this.parentCheck=-1,this.dom=e.contentDOM,this.observer=new MutationObserver(t=>{for(let i of t)this.queue.push(i);(M.ie&&M.ie_version<=11||M.ios&&e.composing)&&t.some(i=>i.type=="childList"&&i.removedNodes.length||i.type=="characterData"&&i.oldValue.length>i.target.nodeValue.length)?this.flushSoon():this.flush()}),fs&&(this.onCharData=t=>{this.queue.push({target:t.target,type:"characterData",oldValue:t.prevValue}),this.flushSoon()}),this.onSelectionChange=this.onSelectionChange.bind(this),this.onResize=this.onResize.bind(this),this.onPrint=this.onPrint.bind(this),this.onScroll=this.onScroll.bind(this),typeof ResizeObserver=="function"&&(this.resize=new ResizeObserver(()=>{var t;((t=this.view.docView)===null||t===void 0?void 0:t.lastUpdate){this.parentCheck<0&&(this.parentCheck=setTimeout(this.listenForScroll.bind(this),1e3)),t.length>0&&t[t.length-1].intersectionRatio>0!=this.intersecting&&(this.intersecting=!this.intersecting,this.intersecting!=this.view.inView&&this.onScrollChanged(document.createEvent("Event")))},{}),this.intersection.observe(this.dom),this.gapIntersection=new IntersectionObserver(t=>{t.length>0&&t[t.length-1].intersectionRatio>0&&this.onScrollChanged(document.createEvent("Event"))},{})),this.listenForScroll(),this.readSelectionRange()}onScrollChanged(e){this.view.inputState.runScrollHandlers(this.view,e),this.intersecting&&this.view.measure()}onScroll(e){this.intersecting&&this.flush(!1),this.onScrollChanged(e)}onResize(){this.resizeTimeout<0&&(this.resizeTimeout=setTimeout(()=>{this.resizeTimeout=-1,this.view.requestMeasure()},50))}onPrint(){this.view.viewState.printing=!0,this.view.measure(),setTimeout(()=>{this.view.viewState.printing=!1,this.view.requestMeasure()},500)}updateGaps(e){if(this.gapIntersection&&(e.length!=this.gaps.length||this.gaps.some((t,i)=>t!=e[i]))){this.gapIntersection.disconnect();for(let t of e)this.gapIntersection.observe(t);this.gaps=e}}onSelectionChange(e){let t=this.selectionChanged;if(!this.readSelectionRange()||this.delayedAndroidKey)return;let{view:i}=this,s=this.selectionRange;if(i.state.facet(Un)?i.root.activeElement!=this.dom:!bn(i.dom,s))return;let r=s.anchorNode&&i.docView.nearest(s.anchorNode);if(r&&r.ignoreEvent(e)){t||(this.selectionChanged=!1);return}(M.ie&&M.ie_version<=11||M.android&&M.chrome)&&!i.state.selection.main.empty&&s.focusNode&&Dn(s.focusNode,s.focusOffset,s.anchorNode,s.anchorOffset)?this.flushSoon():this.flush(!1)}readSelectionRange(){let{view:e}=this,t=M.safari&&e.root.nodeType==11&&Mf(this.dom.ownerDocument)==this.dom&&Nu(this.view)||Mn(e.root);if(!t||this.selectionRange.eq(t))return!1;let i=bn(this.dom,t);return i&&!this.selectionChanged&&e.inputState.lastFocusTime>Date.now()-200&&e.inputState.lastTouchTime{let r=this.delayedAndroidKey;r&&(this.clearDelayedAndroidKey(),!this.flush()&&r.force&&Qt(this.dom,r.key,r.keyCode))};this.flushingAndroidKey=this.view.win.requestAnimationFrame(s)}(!this.delayedAndroidKey||e=="Enter")&&(this.delayedAndroidKey={key:e,keyCode:t,force:this.lastChange{this.delayedFlush=-1,this.flush()}))}forceFlush(){this.delayedFlush>=0&&(this.view.win.cancelAnimationFrame(this.delayedFlush),this.delayedFlush=-1),this.flush()}processRecords(){let e=this.queue;for(let r of this.observer.takeRecords())e.push(r);e.length&&(this.queue=[]);let t=-1,i=-1,s=!1;for(let r of e){let o=this.readMutation(r);o&&(o.typeOver&&(s=!0),t==-1?{from:t,to:i}=o:(t=Math.min(o.from,t),i=Math.max(o.to,i)))}return{from:t,to:i,typeOver:s}}readChange(){let{from:e,to:t,typeOver:i}=this.processRecords(),s=this.selectionChanged&&bn(this.dom,this.selectionRange);return e<0&&!s?null:(e>-1&&(this.lastChange=Date.now()),this.view.inputState.lastFocusTime=0,this.selectionChanged=!1,new Pu(this.view,e,t,i))}flush(e=!0){if(this.delayedFlush>=0||this.delayedAndroidKey)return!1;e&&this.readSelectionRange();let t=this.readChange();if(!t)return!1;let i=this.view.state,s=xh(this.view,t);return this.view.state==i&&this.view.update([]),s}readMutation(e){let t=this.view.docView.nearest(e.target);if(!t||t.ignoreMutation(e))return null;if(t.markDirty(e.type=="attributes"),e.type=="attributes"&&(t.dirty|=4),e.type=="childList"){let i=Jo(t,e.previousSibling||e.target.previousSibling,-1),s=Jo(t,e.nextSibling||e.target.nextSibling,1);return{from:i?t.posAfter(i):t.posAtStart,to:s?t.posBefore(s):t.posAtEnd,typeOver:!1}}else return e.type=="characterData"?{from:t.posAtStart,to:t.posAtEnd,typeOver:e.target.nodeValue==e.oldValue}:null}setWindow(e){e!=this.win&&(this.removeWindowListeners(this.win),this.win=e,this.addWindowListeners(this.win))}addWindowListeners(e){e.addEventListener("resize",this.onResize),e.addEventListener("beforeprint",this.onPrint),e.addEventListener("scroll",this.onScroll),e.document.addEventListener("selectionchange",this.onSelectionChange)}removeWindowListeners(e){e.removeEventListener("scroll",this.onScroll),e.removeEventListener("resize",this.onResize),e.removeEventListener("beforeprint",this.onPrint),e.document.removeEventListener("selectionchange",this.onSelectionChange)}destroy(){var e,t,i;this.stop(),(e=this.intersection)===null||e===void 0||e.disconnect(),(t=this.gapIntersection)===null||t===void 0||t.disconnect(),(i=this.resize)===null||i===void 0||i.disconnect();for(let s of this.scrollTargets)s.removeEventListener("scroll",this.onScroll);this.removeWindowListeners(this.win),clearTimeout(this.parentCheck),clearTimeout(this.resizeTimeout),this.win.cancelAnimationFrame(this.delayedFlush),this.win.cancelAnimationFrame(this.flushingAndroidKey)}}function Jo(n,e,t){for(;e;){let i=K.get(e);if(i&&i.parent==n)return i;let s=e.parentNode;e=s!=n.dom?s:t>0?e.nextSibling:e.previousSibling}return null}function Nu(n){let e=null;function t(a){a.preventDefault(),a.stopImmediatePropagation(),e=a.getTargetRanges()[0]}if(n.contentDOM.addEventListener("beforeinput",t,!0),n.dom.ownerDocument.execCommand("indent"),n.contentDOM.removeEventListener("beforeinput",t,!0),!e)return null;let i=e.startContainer,s=e.startOffset,r=e.endContainer,o=e.endOffset,l=n.docView.domAtPos(n.state.selection.main.anchor);return Dn(l.node,l.offset,r,o)&&([i,s,r,o]=[r,o,i,s]),{anchorNode:i,anchorOffset:s,focusNode:r,focusOffset:o}}class B{constructor(e={}){this.plugins=[],this.pluginMap=new Map,this.editorAttrs={},this.contentAttrs={},this.bidiCache=[],this.destroyed=!1,this.updateState=2,this.measureScheduled=-1,this.measureRequests=[],this.contentDOM=document.createElement("div"),this.scrollDOM=document.createElement("div"),this.scrollDOM.tabIndex=-1,this.scrollDOM.className="cm-scroller",this.scrollDOM.appendChild(this.contentDOM),this.announceDOM=document.createElement("div"),this.announceDOM.style.cssText="position: absolute; top: -10000px",this.announceDOM.setAttribute("aria-live","polite"),this.dom=document.createElement("div"),this.dom.appendChild(this.announceDOM),this.dom.appendChild(this.scrollDOM),this._dispatch=e.dispatch||(t=>this.update([t])),this.dispatch=this.dispatch.bind(this),this._root=e.root||Bf(e.parent)||document,this.viewState=new Uo(e.state||_.create(e)),this.plugins=this.state.facet(ki).map(t=>new ls(t));for(let t of this.plugins)t.update(this);this.observer=new _u(this),this.inputState=new ru(this),this.inputState.ensureHandlers(this,this.plugins),this.docView=new Oo(this),this.mountStyles(),this.updateAttrs(),this.updateState=0,this.requestMeasure(),e.parent&&e.parent.appendChild(this.dom)}get state(){return this.viewState.state}get viewport(){return this.viewState.viewport}get visibleRanges(){return this.viewState.visibleRanges}get inView(){return this.viewState.inView}get composing(){return this.inputState.composing>0}get compositionStarted(){return this.inputState.composing>=0}get root(){return this._root}get win(){return this.dom.ownerDocument.defaultView||window}dispatch(...e){this._dispatch(e.length==1&&e[0]instanceof re?e[0]:this.state.update(...e))}update(e){if(this.updateState!=0)throw new Error("Calls to EditorView.update are not allowed while an update is in progress");let t=!1,i=!1,s,r=this.state;for(let h of e){if(h.startState!=r)throw new RangeError("Trying to update state with a transaction that doesn't start from the previous state.");r=h.state}if(this.destroyed){this.viewState.state=r;return}let o=this.observer.delayedAndroidKey,l=null;if(o?(this.observer.clearDelayedAndroidKey(),l=this.observer.readChange(),(l&&!this.state.doc.eq(r.doc)||!this.state.selection.eq(r.selection))&&(l=null)):this.observer.clear(),r.facet(_.phrases)!=this.state.facet(_.phrases))return this.setState(r);s=Bn.create(this,r,e);let a=this.viewState.scrollTarget;try{this.updateState=2;for(let h of e){if(a&&(a=a.map(h.changes)),h.scrollIntoView){let{main:c}=h.state.selection;a=new On(c.empty?c:k.cursor(c.head,c.head>c.anchor?-1:1))}for(let c of h.effects)c.is(Mo)&&(a=c.value)}this.viewState.update(s,a),this.bidiCache=Pn.update(this.bidiCache,s.changes),s.empty||(this.updatePlugins(s),this.inputState.update(s)),t=this.docView.update(s),this.state.facet(vi)!=this.styleModules&&this.mountStyles(),i=this.updateAttrs(),this.showAnnouncements(e),this.docView.updateSelection(t,e.some(h=>h.isUserEvent("select.pointer")))}finally{this.updateState=0}if(s.startState.facet(nn)!=s.state.facet(nn)&&(this.viewState.mustMeasureContent=!0),(t||i||a||this.viewState.mustEnforceCursorAssoc||this.viewState.mustMeasureContent)&&this.requestMeasure(),!s.empty)for(let h of this.state.facet(tr))h(s);l&&!xh(this,l)&&o.force&&Qt(this.contentDOM,o.key,o.keyCode)}setState(e){if(this.updateState!=0)throw new Error("Calls to EditorView.setState are not allowed while an update is in progress");if(this.destroyed){this.viewState.state=e;return}this.updateState=2;let t=this.hasFocus;try{for(let i of this.plugins)i.destroy(this);this.viewState=new Uo(e),this.plugins=e.facet(ki).map(i=>new ls(i)),this.pluginMap.clear();for(let i of this.plugins)i.update(this);this.docView=new Oo(this),this.inputState.ensureHandlers(this,this.plugins),this.mountStyles(),this.updateAttrs(),this.bidiCache=[]}finally{this.updateState=0}t&&this.focus(),this.requestMeasure()}updatePlugins(e){let t=e.startState.facet(ki),i=e.state.facet(ki);if(t!=i){let s=[];for(let r of i){let o=t.indexOf(r);if(o<0)s.push(new ls(r));else{let l=this.plugins[o];l.mustUpdate=e,s.push(l)}}for(let r of this.plugins)r.mustUpdate!=e&&r.destroy(this);this.plugins=s,this.pluginMap.clear(),this.inputState.ensureHandlers(this,this.plugins)}else for(let s of this.plugins)s.mustUpdate=e;for(let s=0;s-1&&cancelAnimationFrame(this.measureScheduled),this.measureScheduled=0,e&&this.observer.forceFlush();let t=null,{scrollHeight:i,scrollTop:s,clientHeight:r}=this.scrollDOM,o=s>i-r-4?i:s;try{for(let l=0;;l++){this.updateState=1;let a=this.viewport,h=this.viewState.lineBlockAtHeight(o),c=this.viewState.measure(this);if(!c&&!this.measureRequests.length&&this.viewState.scrollTarget==null)break;if(l>5){console.warn(this.measureRequests.length?"Measure loop restarted more than 5 times":"Viewport failed to stabilize");break}let f=[];c&4||([this.measureRequests,f]=[f,this.measureRequests]);let u=f.map(b=>{try{return b.read(this)}catch(w){return He(this.state,w),Yo}}),d=Bn.create(this,this.state,[]),p=!1,g=!1;d.flags|=c,t?t.flags|=c:t=d,this.updateState=2,d.empty||(this.updatePlugins(d),this.inputState.update(d),this.updateAttrs(),p=this.docView.update(d));for(let b=0;b1||b<-1)&&(this.scrollDOM.scrollTop+=b,g=!0)}if(p&&this.docView.updateSelection(!0),this.viewport.from==a.from&&this.viewport.to==a.to&&!g&&this.measureRequests.length==0)break}}finally{this.updateState=0,this.measureScheduled=-1}if(t&&!t.empty)for(let l of this.state.facet(tr))l(t)}get themeClasses(){return ar+" "+(this.state.facet(lr)?kh:wh)+" "+this.state.facet(nn)}updateAttrs(){let e=Xo(this,eh,{class:"cm-editor"+(this.hasFocus?" cm-focused ":" ")+this.themeClasses}),t={spellcheck:"false",autocorrect:"off",autocapitalize:"off",translate:"no",contenteditable:this.state.facet(Un)?"true":"false",class:"cm-content",style:`${M.tabSize}: ${this.state.tabSize}`,role:"textbox","aria-multiline":"true"};this.state.readOnly&&(t["aria-readonly"]="true"),Xo(this,th,t);let i=this.observer.ignore(()=>{let s=$s(this.contentDOM,this.contentAttrs,t),r=$s(this.dom,this.editorAttrs,e);return s||r});return this.editorAttrs=e,this.contentAttrs=t,i}showAnnouncements(e){let t=!0;for(let i of e)for(let s of i.effects)if(s.is(B.announce)){t&&(this.announceDOM.textContent=""),t=!1;let r=this.announceDOM.appendChild(document.createElement("div"));r.textContent=s.value}}mountStyles(){this.styleModules=this.state.facet(vi),mt.mount(this.root,this.styleModules.concat(Bu).reverse())}readMeasured(){if(this.updateState==2)throw new Error("Reading the editor layout isn't allowed during an update");this.updateState==0&&this.measureScheduled>-1&&this.measure(!1)}requestMeasure(e){if(this.measureScheduled<0&&(this.measureScheduled=this.win.requestAnimationFrame(()=>this.measure())),e){if(e.key!=null){for(let t=0;ti.spec==e)||null),t&&t.update(this).value}get documentTop(){return this.contentDOM.getBoundingClientRect().top+this.viewState.paddingTop}get documentPadding(){return{top:this.viewState.paddingTop,bottom:this.viewState.paddingBottom}}elementAtHeight(e){return this.readMeasured(),this.viewState.elementAtHeight(e)}lineBlockAtHeight(e){return this.readMeasured(),this.viewState.lineBlockAtHeight(e)}get viewportLineBlocks(){return this.viewState.viewportLines}lineBlockAt(e){return this.viewState.lineBlockAt(e)}get contentHeight(){return this.viewState.contentHeight}moveByChar(e,t,i){return hs(this,e,Io(this,e,t,i))}moveByGroup(e,t){return hs(this,e,Io(this,e,t,i=>nu(this,e.head,i)))}moveToLineBoundary(e,t,i=!0){return iu(this,e,t,i)}moveVertically(e,t,i){return hs(this,e,su(this,e,t,i))}domAtPos(e){return this.docView.domAtPos(e)}posAtDOM(e,t=0){return this.docView.posFromDOM(e,t)}posAtCoords(e,t=!0){return this.readMeasured(),ch(this,e,t)}coordsAtPos(e,t=1){this.readMeasured();let i=this.docView.coordsAt(e,t);if(!i||i.left==i.right)return i;let s=this.state.doc.lineAt(e),r=this.bidiSpans(s),o=r[$t.find(r,e-s.from,-1,t)];return Pr(i,o.dir==Q.LTR==t>0)}get defaultCharacterWidth(){return this.viewState.heightOracle.charWidth}get defaultLineHeight(){return this.viewState.heightOracle.lineHeight}get textDirection(){return this.viewState.defaultTextDirection}textDirectionAt(e){return!this.state.facet(Qa)||ethis.viewport.to?this.textDirection:(this.readMeasured(),this.docView.textDirectionAt(e))}get lineWrapping(){return this.viewState.heightOracle.lineWrapping}bidiSpans(e){if(e.length>Vu)return rh(e.length);let t=this.textDirectionAt(e.from);for(let s of this.bidiCache)if(s.from==e.from&&s.dir==t)return s.order;let i=zf(e.text,t);return this.bidiCache.push(new Pn(e.from,e.to,t,i)),i}get hasFocus(){var e;return(this.dom.ownerDocument.hasFocus()||M.safari&&((e=this.inputState)===null||e===void 0?void 0:e.lastContextMenu)>Date.now()-3e4)&&this.root.activeElement==this.contentDOM}focus(){this.observer.ignore(()=>{La(this.contentDOM),this.docView.updateSelection()})}setRoot(e){this._root!=e&&(this._root=e,this.observer.setWindow((e.nodeType==9?e:e.ownerDocument).defaultView||window),this.mountStyles())}destroy(){for(let e of this.plugins)e.destroy(this);this.plugins=[],this.inputState.destroy(),this.dom.remove(),this.observer.destroy(),this.measureScheduled>-1&&cancelAnimationFrame(this.measureScheduled),this.destroyed=!0}static scrollIntoView(e,t={}){return Mo.of(new On(typeof e=="number"?k.cursor(e):e,t.y,t.x,t.yMargin,t.xMargin))}static domEventHandlers(e){return ye.define(()=>({}),{eventHandlers:e})}static theme(e,t){let i=mt.newName(),s=[nn.of(i),vi.of(hr(`.${i}`,e))];return t&&t.dark&&s.push(lr.of(!0)),s}static baseTheme(e){return Wi.lowest(vi.of(hr("."+ar,e,vh)))}static findFromDOM(e){var t;let i=e.querySelector(".cm-content"),s=i&&K.get(i)||K.get(e);return((t=s?.rootView)===null||t===void 0?void 0:t.view)||null}}B.styleModule=vi;B.inputHandler=Za;B.perLineTextDirection=Qa;B.exceptionSink=Xa;B.updateListener=tr;B.editable=Un;B.mouseSelectionStyle=Ya;B.dragMovesSelection=Ja;B.clickAddsSelectionRange=Ga;B.decorations=Ii;B.atomicRanges=ih;B.scrollMargins=nh;B.darkTheme=lr;B.contentAttributes=th;B.editorAttributes=eh;B.lineWrapping=B.contentAttributes.of({class:"cm-lineWrapping"});B.announce=R.define();const Vu=4096,Yo={};class Pn{constructor(e,t,i,s){this.from=e,this.to=t,this.dir=i,this.order=s}static update(e,t){if(t.empty)return e;let i=[],s=e.length?e[e.length-1].dir:Q.LTR;for(let r=Math.max(0,e.length-10);r=0;s--){let r=i[s],o=typeof r=="function"?r(n):r;o&&Qs(o,t)}return t}const Fu=M.mac?"mac":M.windows?"win":M.linux?"linux":"key";function Hu(n,e){const t=n.split(/-(?!$)/);let i=t[t.length-1];i=="Space"&&(i=" ");let s,r,o,l;for(let a=0;ai.concat(s),[]))),t}let at=null;const qu=4e3;function ju(n,e=Fu){let t=Object.create(null),i=Object.create(null),s=(o,l)=>{let a=i[o];if(a==null)i[o]=l;else if(a!=l)throw new Error("Key binding "+o+" is used both as a regular binding and as a multi-stroke prefix")},r=(o,l,a,h)=>{var c,f;let u=t[o]||(t[o]=Object.create(null)),d=l.split(/ (?!$)/).map(b=>Hu(b,e));for(let b=1;b{let S=at={view:y,prefix:w,scope:o};return setTimeout(()=>{at==S&&(at=null)},qu),!0}]})}let p=d.join(" ");s(p,!1);let g=u[p]||(u[p]={preventDefault:!1,run:((f=(c=u._any)===null||c===void 0?void 0:c.run)===null||f===void 0?void 0:f.slice())||[]});a&&g.run.push(a),h&&(g.preventDefault=!0)};for(let o of n){let l=o.scope?o.scope.split(" "):["editor"];if(o.any)for(let h of l){let c=t[h]||(t[h]=Object.create(null));c._any||(c._any={preventDefault:!1,run:[]});for(let f in c)c[f].run.push(o.any)}let a=o[e]||o.key;if(a)for(let h of l)r(h,a,o.run,o.preventDefault),o.shift&&r(h,"Shift-"+a,o.shift,o.preventDefault)}return t}function Ku(n,e,t,i){let s=Af(e),r=ge(s,0),o=Ee(r)==s.length&&s!=" ",l="",a=!1;at&&at.view==t&&at.scope==i&&(l=at.prefix+" ",(a=uh.indexOf(e.keyCode)<0)&&(at=null));let h=new Set,c=p=>{if(p){for(let g of p.run)if(!h.has(g)&&(h.add(g),g(t,e)))return!0;p.preventDefault&&(a=!0)}return!1},f=n[i],u,d;if(f){if(c(f[l+sn(s,e,!o)]))return!0;if(o&&(e.shiftKey||e.altKey||e.metaKey||r>127)&&(u=gt[e.keyCode])&&u!=s){if(c(f[l+sn(u,e,!0)]))return!0;if(e.shiftKey&&(d=Ei[e.keyCode])!=s&&d!=u&&c(f[l+sn(d,e,!1)]))return!0}else if(o&&e.shiftKey&&c(f[l+sn(s,e,!0)]))return!0;if(c(f._any))return!0}return a}const Sh=!M.ios,Si=O.define({combine(n){return Wt(n,{cursorBlinkRate:1200,drawRangeCursor:!0},{cursorBlinkRate:(e,t)=>Math.min(e,t),drawRangeCursor:(e,t)=>e||t})}});function Uu(n={}){return[Si.of(n),Gu,Ju,$a.of(!0)]}class Ch{constructor(e,t,i,s,r){this.left=e,this.top=t,this.width=i,this.height=s,this.className=r}draw(){let e=document.createElement("div");return e.className=this.className,this.adjust(e),e}adjust(e){e.style.left=this.left+"px",e.style.top=this.top+"px",this.width>=0&&(e.style.width=this.width+"px"),e.style.height=this.height+"px"}eq(e){return this.left==e.left&&this.top==e.top&&this.width==e.width&&this.height==e.height&&this.className==e.className}}const Gu=ye.fromClass(class{constructor(n){this.view=n,this.rangePieces=[],this.cursors=[],this.measureReq={read:this.readPos.bind(this),write:this.drawSel.bind(this)},this.selectionLayer=n.scrollDOM.appendChild(document.createElement("div")),this.selectionLayer.className="cm-selectionLayer",this.selectionLayer.setAttribute("aria-hidden","true"),this.cursorLayer=n.scrollDOM.appendChild(document.createElement("div")),this.cursorLayer.className="cm-cursorLayer",this.cursorLayer.setAttribute("aria-hidden","true"),n.requestMeasure(this.measureReq),this.setBlinkRate()}setBlinkRate(){this.cursorLayer.style.animationDuration=this.view.state.facet(Si).cursorBlinkRate+"ms"}update(n){let e=n.startState.facet(Si)!=n.state.facet(Si);(e||n.selectionSet||n.geometryChanged||n.viewportChanged)&&this.view.requestMeasure(this.measureReq),n.transactions.some(t=>t.scrollIntoView)&&(this.cursorLayer.style.animationName=this.cursorLayer.style.animationName=="cm-blink"?"cm-blink2":"cm-blink"),e&&this.setBlinkRate()}readPos(){let{state:n}=this.view,e=n.facet(Si),t=n.selection.ranges.map(s=>s.empty?[]:Yu(this.view,s)).reduce((s,r)=>s.concat(r)),i=[];for(let s of n.selection.ranges){let r=s==n.selection.main;if(s.empty?!r||Sh:e.drawRangeCursor){let o=Xu(this.view,s,r);o&&i.push(o)}}return{rangePieces:t,cursors:i}}drawSel({rangePieces:n,cursors:e}){if(n.length!=this.rangePieces.length||n.some((t,i)=>!t.eq(this.rangePieces[i]))){this.selectionLayer.textContent="";for(let t of n)this.selectionLayer.appendChild(t.draw());this.rangePieces=n}if(e.length!=this.cursors.length||e.some((t,i)=>!t.eq(this.cursors[i]))){let t=this.cursorLayer.children;if(t.length!==e.length){this.cursorLayer.textContent="";for(const i of e)this.cursorLayer.appendChild(i.draw())}else e.forEach((i,s)=>i.adjust(t[s]));this.cursors=e}}destroy(){this.selectionLayer.remove(),this.cursorLayer.remove()}}),Ah={".cm-line":{"& ::selection":{backgroundColor:"transparent !important"},"&::selection":{backgroundColor:"transparent !important"}}};Sh&&(Ah[".cm-line"].caretColor="transparent !important");const Ju=Wi.highest(B.theme(Ah));function Mh(n){let e=n.scrollDOM.getBoundingClientRect();return{left:(n.textDirection==Q.LTR?e.left:e.right-n.scrollDOM.clientWidth)-n.scrollDOM.scrollLeft,top:e.top-n.scrollDOM.scrollTop}}function Qo(n,e,t){let i=k.cursor(e);return{from:Math.max(t.from,n.moveToLineBoundary(i,!1,!0).from),to:Math.min(t.to,n.moveToLineBoundary(i,!0,!0).from),type:z.Text}}function $o(n,e){let t=n.lineBlockAt(e);if(Array.isArray(t.type)){for(let i of t.type)if(i.to>e||i.to==e&&(i.to==t.to||i.type==z.Text))return i}return t}function Yu(n,e){if(e.to<=n.viewport.from||e.from>=n.viewport.to)return[];let t=Math.max(e.from,n.viewport.from),i=Math.min(e.to,n.viewport.to),s=n.textDirection==Q.LTR,r=n.contentDOM,o=r.getBoundingClientRect(),l=Mh(n),a=window.getComputedStyle(r.firstChild),h=o.left+parseInt(a.paddingLeft)+Math.min(0,parseInt(a.textIndent)),c=o.right-parseInt(a.paddingRight),f=$o(n,t),u=$o(n,i),d=f.type==z.Text?f:null,p=u.type==z.Text?u:null;if(n.lineWrapping&&(d&&(d=Qo(n,t,d)),p&&(p=Qo(n,i,p))),d&&p&&d.from==p.from)return b(w(e.from,e.to,d));{let S=d?w(e.from,null,d):y(f,!1),C=p?w(null,e.to,p):y(u,!0),A=[];return(d||f).to<(p||u).from-1?A.push(g(h,S.bottom,c,C.top)):S.bottomP&&G.from=T)break;Y>$&&I(Math.max(ce,$),S==null&&ce<=P,Math.min(Y,T),C==null&&Y>=V,X.dir)}if($=J.to+1,$>=T)break}return U.length==0&&I(P,S==null,V,C==null,n.textDirection),{top:D,bottom:v,horizontal:U}}function y(S,C){let A=o.top+(C?S.top:S.bottom);return{top:A,bottom:A,horizontal:[]}}}function Xu(n,e,t){let i=n.coordsAtPos(e.head,e.assoc||1);if(!i)return null;let s=Mh(n);return new Ch(i.left-s.left,i.top-s.top,-1,i.bottom-i.top,t?"cm-cursor cm-cursor-primary":"cm-cursor cm-cursor-secondary")}function el(n,e,t,i,s){e.lastIndex=0;for(let r=n.iterRange(t,i),o=t,l;!r.next().done;o+=r.value.length)if(!r.lineBreak)for(;l=e.exec(r.value);)s(o+l.index,l)}function Zu(n,e){let t=n.visibleRanges;if(t.length==1&&t[0].from==n.viewport.from&&t[0].to==n.viewport.to)return t;let i=[];for(let{from:s,to:r}of t)s=Math.max(n.state.doc.lineAt(s).from,s-e),r=Math.min(n.state.doc.lineAt(r).to,r+e),i.length&&i[i.length-1].to>=s?i[i.length-1].to=r:i.push({from:s,to:r});return i}class Qu{constructor(e){const{regexp:t,decoration:i,decorate:s,boundary:r,maxLength:o=1e3}=e;if(!t.global)throw new RangeError("The regular expression given to MatchDecorator should have its 'g' flag set");if(this.regexp=t,s)this.addMatch=(l,a,h,c)=>s(c,h,h+l[0].length,l,a);else if(typeof i=="function")this.addMatch=(l,a,h,c)=>{let f=i(l,a,h);f&&c(h,h+l[0].length,f)};else if(i)this.addMatch=(l,a,h,c)=>c(h,h+l[0].length,i);else throw new RangeError("Either 'decorate' or 'decoration' should be provided to MatchDecorator");this.boundary=r,this.maxLength=o}createDeco(e){let t=new It,i=t.add.bind(t);for(let{from:s,to:r}of Zu(e,this.maxLength))el(e.state.doc,this.regexp,s,r,(o,l)=>this.addMatch(l,e,o,i));return t.finish()}updateDeco(e,t){let i=1e9,s=-1;return e.docChanged&&e.changes.iterChanges((r,o,l,a)=>{a>e.view.viewport.from&&l1e3?this.createDeco(e.view):s>-1?this.updateRange(e.view,t.map(e.changes),i,s):t}updateRange(e,t,i,s){for(let r of e.visibleRanges){let o=Math.max(r.from,i),l=Math.min(r.to,s);if(l>o){let a=e.state.doc.lineAt(o),h=a.toa.from;o--)if(this.boundary.test(a.text[o-1-a.from])){c=o;break}for(;lu.push(w.range(g,b));if(a==h)for(this.regexp.lastIndex=c-a.from;(d=this.regexp.exec(a.text))&&d.indexthis.addMatch(b,e,g,p));t=t.update({filterFrom:c,filterTo:f,filter:(g,b)=>gf,add:u})}}return t}}const cr=/x/.unicode!=null?"gu":"g",$u=new RegExp(`[\0-\b ---Ÿ­؜​‎‏\u2028\u2029‭‮⁦⁧⁩\uFEFF-]`,cr),ed={0:"null",7:"bell",8:"backspace",10:"newline",11:"vertical tab",13:"carriage return",27:"escape",8203:"zero width space",8204:"zero width non-joiner",8205:"zero width joiner",8206:"left-to-right mark",8207:"right-to-left mark",8232:"line separator",8237:"left-to-right override",8238:"right-to-left override",8294:"left-to-right isolate",8295:"right-to-left isolate",8297:"pop directional isolate",8233:"paragraph separator",65279:"zero width no-break space",65532:"object replacement"};let us=null;function td(){var n;if(us==null&&typeof document<"u"&&document.body){let e=document.body.style;us=((n=e.tabSize)!==null&&n!==void 0?n:e.MozTabSize)!=null}return us||!1}const wn=O.define({combine(n){let e=Wt(n,{render:null,specialChars:$u,addSpecialChars:null});return(e.replaceTabs=!td())&&(e.specialChars=new RegExp(" |"+e.specialChars.source,cr)),e.addSpecialChars&&(e.specialChars=new RegExp(e.specialChars.source+"|"+e.addSpecialChars.source,cr)),e}});function id(n={}){return[wn.of(n),nd()]}let tl=null;function nd(){return tl||(tl=ye.fromClass(class{constructor(n){this.view=n,this.decorations=E.none,this.decorationCache=Object.create(null),this.decorator=this.makeDecorator(n.state.facet(wn)),this.decorations=this.decorator.createDeco(n)}makeDecorator(n){return new Qu({regexp:n.specialChars,decoration:(e,t,i)=>{let{doc:s}=t.state,r=ge(e[0],0);if(r==9){let o=s.lineAt(i),l=t.state.tabSize,a=zi(o.text,l,i-o.from);return E.replace({widget:new ld((l-a%l)*this.view.defaultCharacterWidth)})}return this.decorationCache[r]||(this.decorationCache[r]=E.replace({widget:new od(n,r)}))},boundary:n.replaceTabs?void 0:/[^]/})}update(n){let e=n.state.facet(wn);n.startState.facet(wn)!=e?(this.decorator=this.makeDecorator(e),this.decorations=this.decorator.createDeco(n.view)):this.decorations=this.decorator.updateDeco(n,this.decorations)}},{decorations:n=>n.decorations}))}const sd="•";function rd(n){return n>=32?sd:n==10?"␤":String.fromCharCode(9216+n)}class od extends tt{constructor(e,t){super(),this.options=e,this.code=t}eq(e){return e.code==this.code}toDOM(e){let t=rd(this.code),i=e.state.phrase("Control character")+" "+(ed[this.code]||"0x"+this.code.toString(16)),s=this.options.render&&this.options.render(this.code,i,t);if(s)return s;let r=document.createElement("span");return r.textContent=t,r.title=i,r.setAttribute("aria-label",i),r.className="cm-specialChar",r}ignoreEvent(){return!1}}class ld extends tt{constructor(e){super(),this.width=e}eq(e){return e.width==this.width}toDOM(){let e=document.createElement("span");return e.textContent=" ",e.className="cm-tab",e.style.width=this.width+"px",e}ignoreEvent(){return!1}}class ad extends tt{constructor(e){super(),this.content=e}toDOM(){let e=document.createElement("span");return e.className="cm-placeholder",e.style.pointerEvents="none",e.appendChild(typeof this.content=="string"?document.createTextNode(this.content):this.content),typeof this.content=="string"?e.setAttribute("aria-label","placeholder "+this.content):e.setAttribute("aria-hidden","true"),e}ignoreEvent(){return!1}}function hd(n){return ye.fromClass(class{constructor(e){this.view=e,this.placeholder=E.set([E.widget({widget:new ad(n),side:1}).range(0)])}get decorations(){return this.view.state.doc.length?E.none:this.placeholder}},{decorations:e=>e.decorations})}const fr=2e3;function cd(n,e,t){let i=Math.min(e.line,t.line),s=Math.max(e.line,t.line),r=[];if(e.off>fr||t.off>fr||e.col<0||t.col<0){let o=Math.min(e.off,t.off),l=Math.max(e.off,t.off);for(let a=i;a<=s;a++){let h=n.doc.line(a);h.length<=l&&r.push(k.range(h.from+o,h.to+l))}}else{let o=Math.min(e.col,t.col),l=Math.max(e.col,t.col);for(let a=i;a<=s;a++){let h=n.doc.line(a),c=js(h.text,o,n.tabSize,!0);if(c<0)r.push(k.cursor(h.to));else{let f=js(h.text,l,n.tabSize);r.push(k.range(h.from+c,h.from+f))}}}return r}function fd(n,e){let t=n.coordsAtPos(n.viewport.from);return t?Math.round(Math.abs((t.left-e)/n.defaultCharacterWidth)):-1}function il(n,e){let t=n.posAtCoords({x:e.clientX,y:e.clientY},!1),i=n.state.doc.lineAt(t),s=t-i.from,r=s>fr?-1:s==i.length?fd(n,e.clientX):zi(i.text,n.state.tabSize,t-i.from);return{line:i.number,col:r,off:s}}function ud(n,e){let t=il(n,e),i=n.state.selection;return t?{update(s){if(s.docChanged){let r=s.changes.mapPos(s.startState.doc.line(t.line).from),o=s.state.doc.lineAt(r);t={line:o.number,col:t.col,off:Math.min(t.off,o.length)},i=i.map(s.changes)}},get(s,r,o){let l=il(n,s);if(!l)return i;let a=cd(n.state,t,l);return a.length?o?k.create(a.concat(i.ranges)):k.create(a):i}}:null}function dd(n){let e=n?.eventFilter||(t=>t.altKey&&t.button==0);return B.mouseSelectionStyle.of((t,i)=>e(i)?ud(t,i):null)}const pd={Alt:[18,n=>n.altKey],Control:[17,n=>n.ctrlKey],Shift:[16,n=>n.shiftKey],Meta:[91,n=>n.metaKey]},md={style:"cursor: crosshair"};function gd(n={}){let[e,t]=pd[n.key||"Alt"],i=ye.fromClass(class{constructor(s){this.view=s,this.isDown=!1}set(s){this.isDown!=s&&(this.isDown=s,this.view.update([]))}},{eventHandlers:{keydown(s){this.set(s.keyCode==e||t(s))},keyup(s){(s.keyCode==e||!t(s))&&this.set(!1)},mousemove(s){this.set(t(s))}}});return[i,B.contentAttributes.of(s=>{var r;return!((r=s.plugin(i))===null||r===void 0)&&r.isDown?md:null})]}const ds="-10000px";class Dh{constructor(e,t,i){this.facet=t,this.createTooltipView=i,this.input=e.state.facet(t),this.tooltips=this.input.filter(s=>s),this.tooltipViews=this.tooltips.map(i)}update(e){var t;let i=e.state.facet(this.facet),s=i.filter(o=>o);if(i===this.input){for(let o of this.tooltipViews)o.update&&o.update(e);return!1}let r=[];for(let o=0;o{var e,t,i;return{position:M.ios?"absolute":((e=n.find(s=>s.position))===null||e===void 0?void 0:e.position)||"fixed",parent:((t=n.find(s=>s.parent))===null||t===void 0?void 0:t.parent)||null,tooltipSpace:((i=n.find(s=>s.tooltipSpace))===null||i===void 0?void 0:i.tooltipSpace)||bd}}}),Th=ye.fromClass(class{constructor(n){this.view=n,this.inView=!0,this.lastTransaction=0,this.measureTimeout=-1;let e=n.state.facet(ps);this.position=e.position,this.parent=e.parent,this.classes=n.themeClasses,this.createContainer(),this.measureReq={read:this.readMeasure.bind(this),write:this.writeMeasure.bind(this),key:this},this.manager=new Dh(n,_r,t=>this.createTooltip(t)),this.intersectionObserver=typeof IntersectionObserver=="function"?new IntersectionObserver(t=>{Date.now()>this.lastTransaction-50&&t.length>0&&t[t.length-1].intersectionRatio<1&&this.measureSoon()},{threshold:[1]}):null,this.observeIntersection(),n.win.addEventListener("resize",this.measureSoon=this.measureSoon.bind(this)),this.maybeMeasure()}createContainer(){this.parent?(this.container=document.createElement("div"),this.container.style.position="relative",this.container.className=this.view.themeClasses,this.parent.appendChild(this.container)):this.container=this.view.dom}observeIntersection(){if(this.intersectionObserver){this.intersectionObserver.disconnect();for(let n of this.manager.tooltipViews)this.intersectionObserver.observe(n.dom)}}measureSoon(){this.measureTimeout<0&&(this.measureTimeout=setTimeout(()=>{this.measureTimeout=-1,this.maybeMeasure()},50))}update(n){n.transactions.length&&(this.lastTransaction=Date.now());let e=this.manager.update(n);e&&this.observeIntersection();let t=e||n.geometryChanged,i=n.state.facet(ps);if(i.position!=this.position){this.position=i.position;for(let s of this.manager.tooltipViews)s.dom.style.position=this.position;t=!0}if(i.parent!=this.parent){this.parent&&this.container.remove(),this.parent=i.parent,this.createContainer();for(let s of this.manager.tooltipViews)this.container.appendChild(s.dom);t=!0}else this.parent&&this.view.themeClasses!=this.classes&&(this.classes=this.container.className=this.view.themeClasses);t&&this.maybeMeasure()}createTooltip(n){let e=n.create(this.view);if(e.dom.classList.add("cm-tooltip"),n.arrow&&!e.dom.querySelector(".cm-tooltip > .cm-tooltip-arrow")){let t=document.createElement("div");t.className="cm-tooltip-arrow",e.dom.appendChild(t)}return e.dom.style.position=this.position,e.dom.style.top=ds,this.container.appendChild(e.dom),e.mount&&e.mount(this.view),e}destroy(){var n,e;this.view.win.removeEventListener("resize",this.measureSoon);for(let t of this.manager.tooltipViews)t.dom.remove(),(n=t.destroy)===null||n===void 0||n.call(t);(e=this.intersectionObserver)===null||e===void 0||e.disconnect(),clearTimeout(this.measureTimeout)}readMeasure(){let n=this.view.dom.getBoundingClientRect();return{editor:n,parent:this.parent?this.container.getBoundingClientRect():n,pos:this.manager.tooltips.map((e,t)=>{let i=this.manager.tooltipViews[t];return i.getCoords?i.getCoords(e.pos):this.view.coordsAtPos(e.pos)}),size:this.manager.tooltipViews.map(({dom:e})=>e.getBoundingClientRect()),space:this.view.state.facet(ps).tooltipSpace(this.view)}}writeMeasure(n){let{editor:e,space:t}=n,i=[];for(let s=0;s=Math.min(e.bottom,t.bottom)||a.rightMath.min(e.right,t.right)+.1){l.style.top=ds;continue}let c=r.arrow?o.dom.querySelector(".cm-tooltip-arrow"):null,f=c?7:0,u=h.right-h.left,d=h.bottom-h.top,p=o.offset||wd,g=this.view.textDirection==Q.LTR,b=h.width>t.right-t.left?g?t.left:t.right-h.width:g?Math.min(a.left-(c?14:0)+p.x,t.right-u):Math.max(t.left,a.left-u+(c?14:0)-p.x),w=!!r.above;!r.strictSide&&(w?a.top-(h.bottom-h.top)-p.yt.bottom)&&w==t.bottom-a.bottom>a.top-t.top&&(w=!w);let y=w?a.top-d-f-p.y:a.bottom+f+p.y,S=b+u;if(o.overlap!==!0)for(let C of i)C.leftb&&C.topy&&(y=w?C.top-d-2-f:C.bottom+f+2);this.position=="absolute"?(l.style.top=y-n.parent.top+"px",l.style.left=b-n.parent.left+"px"):(l.style.top=y+"px",l.style.left=b+"px"),c&&(c.style.left=`${a.left+(g?p.x:-p.x)-(b+14-7)}px`),o.overlap!==!0&&i.push({left:b,top:y,right:S,bottom:y+d}),l.classList.toggle("cm-tooltip-above",w),l.classList.toggle("cm-tooltip-below",!w),o.positioned&&o.positioned()}}maybeMeasure(){if(this.manager.tooltips.length&&(this.view.inView&&this.view.requestMeasure(this.measureReq),this.inView!=this.view.inView&&(this.inView=this.view.inView,!this.inView)))for(let n of this.manager.tooltipViews)n.dom.style.top=ds}},{eventHandlers:{scroll(){this.maybeMeasure()}}}),yd=B.baseTheme({".cm-tooltip":{zIndex:100},"&light .cm-tooltip":{border:"1px solid #bbb",backgroundColor:"#f5f5f5"},"&light .cm-tooltip-section:not(:first-child)":{borderTop:"1px solid #bbb"},"&dark .cm-tooltip":{backgroundColor:"#333338",color:"white"},".cm-tooltip-arrow":{height:"7px",width:`${7*2}px`,position:"absolute",zIndex:-1,overflow:"hidden","&:before, &:after":{content:"''",position:"absolute",width:0,height:0,borderLeft:"7px solid transparent",borderRight:"7px solid transparent"},".cm-tooltip-above &":{bottom:"-7px","&:before":{borderTop:"7px solid #bbb"},"&:after":{borderTop:"7px solid #f5f5f5",bottom:"1px"}},".cm-tooltip-below &":{top:"-7px","&:before":{borderBottom:"7px solid #bbb"},"&:after":{borderBottom:"7px solid #f5f5f5",top:"1px"}}},"&dark .cm-tooltip .cm-tooltip-arrow":{"&:before":{borderTopColor:"#333338",borderBottomColor:"#333338"},"&:after":{borderTopColor:"transparent",borderBottomColor:"transparent"}}}),wd={x:0,y:0},_r=O.define({enables:[Th,yd]}),En=O.define();class Nr{constructor(e){this.view=e,this.mounted=!1,this.dom=document.createElement("div"),this.dom.classList.add("cm-tooltip-hover"),this.manager=new Dh(e,En,t=>this.createHostedView(t))}static create(e){return new Nr(e)}createHostedView(e){let t=e.create(this.view);return t.dom.classList.add("cm-tooltip-section"),this.dom.appendChild(t.dom),this.mounted&&t.mount&&t.mount(this.view),t}mount(e){for(let t of this.manager.tooltipViews)t.mount&&t.mount(e);this.mounted=!0}positioned(){for(let e of this.manager.tooltipViews)e.positioned&&e.positioned()}update(e){this.manager.update(e)}}const kd=_r.compute([En],n=>{let e=n.facet(En).filter(t=>t);return e.length===0?null:{pos:Math.min(...e.map(t=>t.pos)),end:Math.max(...e.filter(t=>t.end!=null).map(t=>t.end)),create:Nr.create,above:e[0].above,arrow:e.some(t=>t.arrow)}});class vd{constructor(e,t,i,s,r){this.view=e,this.source=t,this.field=i,this.setHover=s,this.hoverTime=r,this.hoverTimeout=-1,this.restartTimeout=-1,this.pending=null,this.lastMove={x:0,y:0,target:e.dom,time:0},this.checkHover=this.checkHover.bind(this),e.dom.addEventListener("mouseleave",this.mouseleave=this.mouseleave.bind(this)),e.dom.addEventListener("mousemove",this.mousemove=this.mousemove.bind(this))}update(){this.pending&&(this.pending=null,clearTimeout(this.restartTimeout),this.restartTimeout=setTimeout(()=>this.startHover(),20))}get active(){return this.view.state.field(this.field)}checkHover(){if(this.hoverTimeout=-1,this.active)return;let e=Date.now()-this.lastMove.time;ei.bottom||e.xi.right+this.view.defaultCharacterWidth)return;let s=this.view.bidiSpans(this.view.state.doc.lineAt(t)).find(l=>l.from<=t&&l.to>=t),r=s&&s.dir==Q.RTL?-1:1,o=this.source(this.view,t,e.x{this.pending==l&&(this.pending=null,a&&this.view.dispatch({effects:this.setHover.of(a)}))},a=>He(this.view.state,a,"hover tooltip"))}else o&&this.view.dispatch({effects:this.setHover.of(o)})}mousemove(e){var t;this.lastMove={x:e.clientX,y:e.clientY,target:e.target,time:Date.now()},this.hoverTimeout<0&&(this.hoverTimeout=setTimeout(this.checkHover,this.hoverTime));let i=this.active;if(i&&!xd(this.lastMove.target)||this.pending){let{pos:s}=i||this.pending,r=(t=i?.end)!==null&&t!==void 0?t:s;(s==r?this.view.posAtCoords(this.lastMove)!=s:!Sd(this.view,s,r,e.clientX,e.clientY,6))&&(this.view.dispatch({effects:this.setHover.of(null)}),this.pending=null)}}mouseleave(){clearTimeout(this.hoverTimeout),this.hoverTimeout=-1,this.active&&this.view.dispatch({effects:this.setHover.of(null)})}destroy(){clearTimeout(this.hoverTimeout),this.view.dom.removeEventListener("mouseleave",this.mouseleave),this.view.dom.removeEventListener("mousemove",this.mousemove)}}function xd(n){for(let e=n;e;e=e.parentNode)if(e.nodeType==1&&e.classList.contains("cm-tooltip"))return!0;return!1}function Sd(n,e,t,i,s,r){let o=document.createRange(),l=n.domAtPos(e),a=n.domAtPos(t);o.setEnd(a.node,a.offset),o.setStart(l.node,l.offset);let h=o.getClientRects();o.detach();for(let c=0;cEn.from(s)});return[i,ye.define(s=>new vd(s,n,i,t,e.hoverTime||300)),kd]}function Ad(n,e){let t=n.plugin(Th);if(!t)return null;let i=t.manager.tooltips.indexOf(e);return i<0?null:t.manager.tooltipViews[i]}const Md=R.define(),nl=O.define({combine(n){let e,t;for(let i of n)e=e||i.topContainer,t=t||i.bottomContainer;return{topContainer:e,bottomContainer:t}}});function Dd(n,e){let t=n.plugin(Oh),i=t?t.specs.indexOf(e):-1;return i>-1?t.panels[i]:null}const Oh=ye.fromClass(class{constructor(n){this.input=n.state.facet(ur),this.specs=this.input.filter(t=>t),this.panels=this.specs.map(t=>t(n));let e=n.state.facet(nl);this.top=new rn(n,!0,e.topContainer),this.bottom=new rn(n,!1,e.bottomContainer),this.top.sync(this.panels.filter(t=>t.top)),this.bottom.sync(this.panels.filter(t=>!t.top));for(let t of this.panels)t.dom.classList.add("cm-panel"),t.mount&&t.mount()}update(n){let e=n.state.facet(nl);this.top.container!=e.topContainer&&(this.top.sync([]),this.top=new rn(n.view,!0,e.topContainer)),this.bottom.container!=e.bottomContainer&&(this.bottom.sync([]),this.bottom=new rn(n.view,!1,e.bottomContainer)),this.top.syncClasses(),this.bottom.syncClasses();let t=n.state.facet(ur);if(t!=this.input){let i=t.filter(a=>a),s=[],r=[],o=[],l=[];for(let a of i){let h=this.specs.indexOf(a),c;h<0?(c=a(n.view),l.push(c)):(c=this.panels[h],c.update&&c.update(n)),s.push(c),(c.top?r:o).push(c)}this.specs=i,this.panels=s,this.top.sync(r),this.bottom.sync(o);for(let a of l)a.dom.classList.add("cm-panel"),a.mount&&a.mount()}else for(let i of this.panels)i.update&&i.update(n)}destroy(){this.top.sync([]),this.bottom.sync([])}},{provide:n=>B.scrollMargins.of(e=>{let t=e.plugin(n);return t&&{top:t.top.scrollMargin(),bottom:t.bottom.scrollMargin()}})});class rn{constructor(e,t,i){this.view=e,this.top=t,this.container=i,this.dom=void 0,this.classes="",this.panels=[],this.syncClasses()}sync(e){for(let t of this.panels)t.destroy&&e.indexOf(t)<0&&t.destroy();this.panels=e,this.syncDOM()}syncDOM(){if(this.panels.length==0){this.dom&&(this.dom.remove(),this.dom=void 0);return}if(!this.dom){this.dom=document.createElement("div"),this.dom.className=this.top?"cm-panels cm-panels-top":"cm-panels cm-panels-bottom",this.dom.style[this.top?"top":"bottom"]="0";let t=this.container||this.view.dom;t.insertBefore(this.dom,this.top?t.firstChild:null)}let e=this.dom.firstChild;for(let t of this.panels)if(t.dom.parentNode==this.dom){for(;e!=t.dom;)e=sl(e);e=e.nextSibling}else this.dom.insertBefore(t.dom,e);for(;e;)e=sl(e)}scrollMargin(){return!this.dom||this.container?0:Math.max(0,this.top?this.dom.getBoundingClientRect().bottom-Math.max(0,this.view.scrollDOM.getBoundingClientRect().top):Math.min(innerHeight,this.view.scrollDOM.getBoundingClientRect().bottom)-this.dom.getBoundingClientRect().top)}syncClasses(){if(!(!this.container||this.classes==this.view.themeClasses)){for(let e of this.classes.split(" "))e&&this.container.classList.remove(e);for(let e of(this.classes=this.view.themeClasses).split(" "))e&&this.container.classList.add(e)}}}function sl(n){let e=n.nextSibling;return n.remove(),e}const ur=O.define({enables:Oh});class yt extends Lt{compare(e){return this==e||this.constructor==e.constructor&&this.eq(e)}eq(e){return!1}destroy(e){}}yt.prototype.elementClass="";yt.prototype.toDOM=void 0;yt.prototype.mapMode=de.TrackBefore;yt.prototype.startSide=yt.prototype.endSide=-1;yt.prototype.point=!0;const ms=O.define(),Td={class:"",renderEmptyElements:!1,elementStyle:"",markers:()=>H.empty,lineMarker:()=>null,lineMarkerChange:null,initialSpacer:null,updateSpacer:null,domEventHandlers:{}},Di=O.define();function Od(n){return[Bh(),Di.of(Object.assign(Object.assign({},Td),n))]}const dr=O.define({combine:n=>n.some(e=>e)});function Bh(n){let e=[Bd];return n&&n.fixed===!1&&e.push(dr.of(!0)),e}const Bd=ye.fromClass(class{constructor(n){this.view=n,this.prevViewport=n.viewport,this.dom=document.createElement("div"),this.dom.className="cm-gutters",this.dom.setAttribute("aria-hidden","true"),this.dom.style.minHeight=this.view.contentHeight+"px",this.gutters=n.state.facet(Di).map(e=>new ol(n,e));for(let e of this.gutters)this.dom.appendChild(e.dom);this.fixed=!n.state.facet(dr),this.fixed&&(this.dom.style.position="sticky"),this.syncGutters(!1),n.scrollDOM.insertBefore(this.dom,n.contentDOM)}update(n){if(this.updateGutters(n)){let e=this.prevViewport,t=n.view.viewport,i=Math.min(e.to,t.to)-Math.max(e.from,t.from);this.syncGutters(i<(t.to-t.from)*.8)}n.geometryChanged&&(this.dom.style.minHeight=this.view.contentHeight+"px"),this.view.state.facet(dr)!=!this.fixed&&(this.fixed=!this.fixed,this.dom.style.position=this.fixed?"sticky":""),this.prevViewport=n.view.viewport}syncGutters(n){let e=this.dom.nextSibling;n&&this.dom.remove();let t=H.iter(this.view.state.facet(ms),this.view.viewport.from),i=[],s=this.gutters.map(r=>new Pd(r,this.view.viewport,-this.view.documentPadding.top));for(let r of this.view.viewportLineBlocks){let o;if(Array.isArray(r.type)){for(let l of r.type)if(l.type==z.Text){o=l;break}}else o=r.type==z.Text?r:void 0;if(o){i.length&&(i=[]),Ph(t,i,r.from);for(let l of s)l.line(this.view,o,i)}}for(let r of s)r.finish();n&&this.view.scrollDOM.insertBefore(this.dom,e)}updateGutters(n){let e=n.startState.facet(Di),t=n.state.facet(Di),i=n.docChanged||n.heightChanged||n.viewportChanged||!H.eq(n.startState.facet(ms),n.state.facet(ms),n.view.viewport.from,n.view.viewport.to);if(e==t)for(let s of this.gutters)s.update(n)&&(i=!0);else{i=!0;let s=[];for(let r of t){let o=e.indexOf(r);o<0?s.push(new ol(this.view,r)):(this.gutters[o].update(n),s.push(this.gutters[o]))}for(let r of this.gutters)r.dom.remove(),s.indexOf(r)<0&&r.destroy();for(let r of s)this.dom.appendChild(r.dom);this.gutters=s}return i}destroy(){for(let n of this.gutters)n.destroy();this.dom.remove()}},{provide:n=>B.scrollMargins.of(e=>{let t=e.plugin(n);return!t||t.gutters.length==0||!t.fixed?null:e.textDirection==Q.LTR?{left:t.dom.offsetWidth}:{right:t.dom.offsetWidth}})});function rl(n){return Array.isArray(n)?n:[n]}function Ph(n,e,t){for(;n.value&&n.from<=t;)n.from==t&&e.push(n.value),n.next()}class Pd{constructor(e,t,i){this.gutter=e,this.height=i,this.localMarkers=[],this.i=0,this.cursor=H.iter(e.markers,t.from)}line(e,t,i){this.localMarkers.length&&(this.localMarkers=[]),Ph(this.cursor,this.localMarkers,t.from);let s=i.length?this.localMarkers.concat(i):this.localMarkers,r=this.gutter.config.lineMarker(e,t,s);r&&s.unshift(r);let o=this.gutter;if(s.length==0&&!o.config.renderEmptyElements)return;let l=t.top-this.height;if(this.i==o.elements.length){let a=new Eh(e,t.height,l,s);o.elements.push(a),o.dom.appendChild(a.dom)}else o.elements[this.i].update(e,t.height,l,s);this.height=t.bottom,this.i++}finish(){let e=this.gutter;for(;e.elements.length>this.i;){let t=e.elements.pop();e.dom.removeChild(t.dom),t.destroy()}}}class ol{constructor(e,t){this.view=e,this.config=t,this.elements=[],this.spacer=null,this.dom=document.createElement("div"),this.dom.className="cm-gutter"+(this.config.class?" "+this.config.class:"");for(let i in t.domEventHandlers)this.dom.addEventListener(i,s=>{let r=e.lineBlockAtHeight(s.clientY-e.documentTop);t.domEventHandlers[i](e,r,s)&&s.preventDefault()});this.markers=rl(t.markers(e)),t.initialSpacer&&(this.spacer=new Eh(e,0,0,[t.initialSpacer(e)]),this.dom.appendChild(this.spacer.dom),this.spacer.dom.style.cssText+="visibility: hidden; pointer-events: none")}update(e){let t=this.markers;if(this.markers=rl(this.config.markers(e.view)),this.spacer&&this.config.updateSpacer){let s=this.config.updateSpacer(this.spacer.markers[0],e);s!=this.spacer.markers[0]&&this.spacer.update(e.view,0,0,[s])}let i=e.view.viewport;return!H.eq(this.markers,t,i.from,i.to)||(this.config.lineMarkerChange?this.config.lineMarkerChange(e):!1)}destroy(){for(let e of this.elements)e.destroy()}}class Eh{constructor(e,t,i,s){this.height=-1,this.above=0,this.markers=[],this.dom=document.createElement("div"),this.dom.className="cm-gutterElement",this.update(e,t,i,s)}update(e,t,i,s){this.height!=t&&(this.dom.style.height=(this.height=t)+"px"),this.above!=i&&(this.dom.style.marginTop=(this.above=i)?i+"px":""),Ed(this.markers,s)||this.setMarkers(e,s)}setMarkers(e,t){let i="cm-gutterElement",s=this.dom.firstChild;for(let r=0,o=0;;){let l=o,a=rr(l,a,h)||o(l,a,h):o}return i}})}});class gs extends yt{constructor(e){super(),this.number=e}eq(e){return this.number==e.number}toDOM(){return document.createTextNode(this.number)}}function bs(n,e){return n.state.facet(Ut).formatNumber(e,n.state)}const Ld=Di.compute([Ut],n=>({class:"cm-lineNumbers",renderEmptyElements:!1,markers(e){return e.state.facet(Rd)},lineMarker(e,t,i){return i.some(s=>s.toDOM)?null:new gs(bs(e,e.state.doc.lineAt(t.from).number))},lineMarkerChange:e=>e.startState.facet(Ut)!=e.state.facet(Ut),initialSpacer(e){return new gs(bs(e,ll(e.state.doc.lines)))},updateSpacer(e,t){let i=bs(t.view,ll(t.view.state.doc.lines));return i==e.number?e:new gs(i)},domEventHandlers:n.facet(Ut).domEventHandlers}));function Id(n={}){return[Ut.of(n),Bh(),Ld]}function ll(n){let e=9;for(;e{throw new Error("This node type doesn't define a deserialize function")})}add(e){if(this.perNode)throw new RangeError("Can't add per-node props to node types");return typeof e!="function"&&(e=xe.match(e)),t=>{let i=e(t);return i===void 0?null:[this,i]}}}L.closedBy=new L({deserialize:n=>n.split(" ")});L.openedBy=new L({deserialize:n=>n.split(" ")});L.group=new L({deserialize:n=>n.split(" ")});L.contextHash=new L({perNode:!0});L.lookAhead=new L({perNode:!0});L.mounted=new L({perNode:!0});class Vd{constructor(e,t,i){this.tree=e,this.overlay=t,this.parser=i}}const Fd=Object.create(null);class xe{constructor(e,t,i,s=0){this.name=e,this.props=t,this.id=i,this.flags=s}static define(e){let t=e.props&&e.props.length?Object.create(null):Fd,i=(e.top?1:0)|(e.skipped?2:0)|(e.error?4:0)|(e.name==null?8:0),s=new xe(e.name||"",t,e.id,i);if(e.props){for(let r of e.props)if(Array.isArray(r)||(r=r(s)),r){if(r[0].perNode)throw new RangeError("Can't store a per-node prop on a node type");t[r[0].id]=r[1]}}return s}prop(e){return this.props[e.id]}get isTop(){return(this.flags&1)>0}get isSkipped(){return(this.flags&2)>0}get isError(){return(this.flags&4)>0}get isAnonymous(){return(this.flags&8)>0}is(e){if(typeof e=="string"){if(this.name==e)return!0;let t=this.prop(L.group);return t?t.indexOf(e)>-1:!1}return this.id==e}static match(e){let t=Object.create(null);for(let i in e)for(let s of i.split(" "))t[s]=e[i];return i=>{for(let s=i.prop(L.group),r=-1;r<(s?s.length:0);r++){let o=t[r<0?i.name:s[r]];if(o)return o}}}}xe.none=new xe("",Object.create(null),0,8);class Vr{constructor(e){this.types=e;for(let t=0;t=s&&(o.type.isAnonymous||t(o)!==!1)){if(o.firstChild())continue;l=!0}for(;l&&i&&!o.type.isAnonymous&&i(o),!o.nextSibling();){if(!o.parent())return;l=!0}}}prop(e){return e.perNode?this.props?this.props[e.id]:void 0:this.type.prop(e)}get propValues(){let e=[];if(this.props)for(let t in this.props)e.push([+t,this.props[t]]);return e}balance(e={}){return this.children.length<=8?this:Wr(xe.none,this.children,this.positions,0,this.children.length,0,this.length,(t,i,s)=>new q(this.type,t,i,s,this.propValues),e.makeTree||((t,i,s)=>new q(xe.none,t,i,s)))}static build(e){return Wd(e)}}q.empty=new q(xe.none,[],[],0);class Fr{constructor(e,t){this.buffer=e,this.index=t}get id(){return this.buffer[this.index-4]}get start(){return this.buffer[this.index-3]}get end(){return this.buffer[this.index-2]}get size(){return this.buffer[this.index-1]}get pos(){return this.index}next(){this.index-=4}fork(){return new Fr(this.buffer,this.index)}}class zt{constructor(e,t,i){this.buffer=e,this.length=t,this.set=i}get type(){return xe.none}toString(){let e=[];for(let t=0;t0));a=o[a+3]);return l}slice(e,t,i){let s=this.buffer,r=new Uint16Array(t-e),o=0;for(let l=e,a=0;l=e&&te;case 1:return t<=e&&i>e;case 2:return i>e;case 4:return!0}}function Lh(n,e){let t=n.childBefore(e);for(;t;){let i=t.lastChild;if(!i||i.to!=t.to)break;i.type.isError&&i.from==i.to?(n=t,t=i.prevSibling):t=i}return n}function oi(n,e,t,i){for(var s;n.from==n.to||(t<1?n.from>=e:n.from>e)||(t>-1?n.to<=e:n.to0?l.length:-1;e!=h;e+=t){let c=l[e],f=a[e]+o.from;if(Rh(s,i,f,f+c.length)){if(c instanceof zt){if(r&ee.ExcludeBuffers)continue;let u=c.findChild(0,c.buffer.length,t,i-f,s);if(u>-1)return new Xe(new Hd(o,c,e,f),null,u)}else if(r&ee.IncludeAnonymous||!c.type.isAnonymous||Hr(c)){let u;if(!(r&ee.IgnoreMounts)&&c.props&&(u=c.prop(L.mounted))&&!u.overlay)return new Ne(u.tree,f,e,o);let d=new Ne(c,f,e,o);return r&ee.IncludeAnonymous||!d.type.isAnonymous?d:d.nextChild(t<0?c.children.length-1:0,t,i,s)}}}if(r&ee.IncludeAnonymous||!o.type.isAnonymous||(o.index>=0?e=o.index+t:e=t<0?-1:o._parent._tree.children.length,o=o._parent,!o))return null}}get firstChild(){return this.nextChild(0,1,0,4)}get lastChild(){return this.nextChild(this._tree.children.length-1,-1,0,4)}childAfter(e){return this.nextChild(0,1,e,2)}childBefore(e){return this.nextChild(this._tree.children.length-1,-1,e,-2)}enter(e,t,i=0){let s;if(!(i&ee.IgnoreOverlays)&&(s=this._tree.prop(L.mounted))&&s.overlay){let r=e-this.from;for(let{from:o,to:l}of s.overlay)if((t>0?o<=r:o=r:l>r))return new Ne(s.tree,s.overlay[0].from+this.from,-1,this)}return this.nextChild(0,1,e,t,i)}nextSignificantParent(){let e=this;for(;e.type.isAnonymous&&e._parent;)e=e._parent;return e}get parent(){return this._parent?this._parent.nextSignificantParent():null}get nextSibling(){return this._parent&&this.index>=0?this._parent.nextChild(this.index+1,1,0,4):null}get prevSibling(){return this._parent&&this.index>=0?this._parent.nextChild(this.index-1,-1,0,4):null}cursor(e=0){return new _i(this,e)}get tree(){return this._tree}toTree(){return this._tree}resolve(e,t=0){return oi(this,e,t,!1)}resolveInner(e,t=0){return oi(this,e,t,!0)}enterUnfinishedNodesBefore(e){return Lh(this,e)}getChild(e,t=null,i=null){let s=Rn(this,e,t,i);return s.length?s[0]:null}getChildren(e,t=null,i=null){return Rn(this,e,t,i)}toString(){return this._tree.toString()}get node(){return this}matchContext(e){return Ln(this,e)}}function Rn(n,e,t,i){let s=n.cursor(),r=[];if(!s.firstChild())return r;if(t!=null){for(;!s.type.is(t);)if(!s.nextSibling())return r}for(;;){if(i!=null&&s.type.is(i))return r;if(s.type.is(e)&&r.push(s.node),!s.nextSibling())return i==null?r:[]}}function Ln(n,e,t=e.length-1){for(let i=n.parent;t>=0;i=i.parent){if(!i)return!1;if(!i.type.isAnonymous){if(e[t]&&e[t]!=i.name)return!1;t--}}return!0}class Hd{constructor(e,t,i,s){this.parent=e,this.buffer=t,this.index=i,this.start=s}}class Xe{get name(){return this.type.name}get from(){return this.context.start+this.context.buffer.buffer[this.index+1]}get to(){return this.context.start+this.context.buffer.buffer[this.index+2]}constructor(e,t,i){this.context=e,this._parent=t,this.index=i,this.type=e.buffer.set.types[e.buffer.buffer[i]]}child(e,t,i){let{buffer:s}=this.context,r=s.findChild(this.index+4,s.buffer[this.index+3],e,t-this.context.start,i);return r<0?null:new Xe(this.context,this,r)}get firstChild(){return this.child(1,0,4)}get lastChild(){return this.child(-1,0,4)}childAfter(e){return this.child(1,e,2)}childBefore(e){return this.child(-1,e,-2)}enter(e,t,i=0){if(i&ee.ExcludeBuffers)return null;let{buffer:s}=this.context,r=s.findChild(this.index+4,s.buffer[this.index+3],t>0?1:-1,e-this.context.start,t);return r<0?null:new Xe(this.context,this,r)}get parent(){return this._parent||this.context.parent.nextSignificantParent()}externalSibling(e){return this._parent?null:this.context.parent.nextChild(this.context.index+e,e,0,4)}get nextSibling(){let{buffer:e}=this.context,t=e.buffer[this.index+3];return t<(this._parent?e.buffer[this._parent.index+3]:e.buffer.length)?new Xe(this.context,this._parent,t):this.externalSibling(1)}get prevSibling(){let{buffer:e}=this.context,t=this._parent?this._parent.index+4:0;return this.index==t?this.externalSibling(-1):new Xe(this.context,this._parent,e.findChild(t,this.index,-1,0,4))}cursor(e=0){return new _i(this,e)}get tree(){return null}toTree(){let e=[],t=[],{buffer:i}=this.context,s=this.index+4,r=i.buffer[this.index+3];if(r>s){let o=i.buffer[this.index+1];e.push(i.slice(s,r,o)),t.push(0)}return new q(this.type,e,t,this.to-this.from)}resolve(e,t=0){return oi(this,e,t,!1)}resolveInner(e,t=0){return oi(this,e,t,!0)}enterUnfinishedNodesBefore(e){return Lh(this,e)}toString(){return this.context.buffer.childString(this.index)}getChild(e,t=null,i=null){let s=Rn(this,e,t,i);return s.length?s[0]:null}getChildren(e,t=null,i=null){return Rn(this,e,t,i)}get node(){return this}matchContext(e){return Ln(this,e)}}class _i{get name(){return this.type.name}constructor(e,t=0){if(this.mode=t,this.buffer=null,this.stack=[],this.index=0,this.bufferNode=null,e instanceof Ne)this.yieldNode(e);else{this._tree=e.context.parent,this.buffer=e.context;for(let i=e._parent;i;i=i._parent)this.stack.unshift(i.index);this.bufferNode=e,this.yieldBuf(e.index)}}yieldNode(e){return e?(this._tree=e,this.type=e.type,this.from=e.from,this.to=e.to,!0):!1}yieldBuf(e,t){this.index=e;let{start:i,buffer:s}=this.buffer;return this.type=t||s.set.types[s.buffer[e]],this.from=i+s.buffer[e+1],this.to=i+s.buffer[e+2],!0}yield(e){return e?e instanceof Ne?(this.buffer=null,this.yieldNode(e)):(this.buffer=e.context,this.yieldBuf(e.index,e.type)):!1}toString(){return this.buffer?this.buffer.buffer.childString(this.index):this._tree.toString()}enterChild(e,t,i){if(!this.buffer)return this.yield(this._tree.nextChild(e<0?this._tree._tree.children.length-1:0,e,t,i,this.mode));let{buffer:s}=this.buffer,r=s.findChild(this.index+4,s.buffer[this.index+3],e,t-this.buffer.start,i);return r<0?!1:(this.stack.push(this.index),this.yieldBuf(r))}firstChild(){return this.enterChild(1,0,4)}lastChild(){return this.enterChild(-1,0,4)}childAfter(e){return this.enterChild(1,e,2)}childBefore(e){return this.enterChild(-1,e,-2)}enter(e,t,i=this.mode){return this.buffer?i&ee.ExcludeBuffers?!1:this.enterChild(1,e,t):this.yield(this._tree.enter(e,t,i))}parent(){if(!this.buffer)return this.yieldNode(this.mode&ee.IncludeAnonymous?this._tree._parent:this._tree.parent);if(this.stack.length)return this.yieldBuf(this.stack.pop());let e=this.mode&ee.IncludeAnonymous?this.buffer.parent:this.buffer.parent.nextSignificantParent();return this.buffer=null,this.yieldNode(e)}sibling(e){if(!this.buffer)return this._tree._parent?this.yield(this._tree.index<0?null:this._tree._parent.nextChild(this._tree.index+e,e,0,4,this.mode)):!1;let{buffer:t}=this.buffer,i=this.stack.length-1;if(e<0){let s=i<0?0:this.stack[i]+4;if(this.index!=s)return this.yieldBuf(t.findChild(s,this.index,-1,0,4))}else{let s=t.buffer[this.index+3];if(s<(i<0?t.buffer.length:t.buffer[this.stack[i]+3]))return this.yieldBuf(s)}return i<0?this.yield(this.buffer.parent.nextChild(this.buffer.index+e,e,0,4,this.mode)):!1}nextSibling(){return this.sibling(1)}prevSibling(){return this.sibling(-1)}atLastNode(e){let t,i,{buffer:s}=this;if(s){if(e>0){if(this.index-1)for(let r=t+e,o=e<0?-1:i._tree.children.length;r!=o;r+=e){let l=i._tree.children[r];if(this.mode&ee.IncludeAnonymous||l instanceof zt||!l.type.isAnonymous||Hr(l))return!1}return!0}move(e,t){if(t&&this.enterChild(e,0,4))return!0;for(;;){if(this.sibling(e))return!0;if(this.atLastNode(e)||!this.parent())return!1}}next(e=!0){return this.move(1,e)}prev(e=!0){return this.move(-1,e)}moveTo(e,t=0){for(;(this.from==this.to||(t<1?this.from>=e:this.from>e)||(t>-1?this.to<=e:this.to=0;){for(let o=e;o;o=o._parent)if(o.index==s){if(s==this.index)return o;t=o,i=r+1;break e}s=this.stack[--r]}for(let s=i;s=0;r--){if(r<0)return Ln(this.node,e,s);let o=i[t.buffer[this.stack[r]]];if(!o.isAnonymous){if(e[s]&&e[s]!=o.name)return!1;s--}}return!0}}function Hr(n){return n.children.some(e=>e instanceof zt||!e.type.isAnonymous||Hr(e))}function Wd(n){var e;let{buffer:t,nodeSet:i,maxBufferLength:s=_d,reused:r=[],minRepeatType:o=i.types.length}=n,l=Array.isArray(t)?new Fr(t,t.length):t,a=i.types,h=0,c=0;function f(C,A,D,v,U){let{id:I,start:P,end:V,size:G}=l,$=c;for(;G<0;)if(l.next(),G==-1){let Y=r[I];D.push(Y),v.push(P-C);return}else if(G==-3){h=I;return}else if(G==-4){c=I;return}else throw new RangeError(`Unrecognized record size: ${G}`);let T=a[I],J,X,ce=P-C;if(V-P<=s&&(X=g(l.pos-A,U))){let Y=new Uint16Array(X.size-X.skip),ie=l.pos-X.size,nt=Y.length;for(;l.pos>ie;)nt=b(X.start,Y,nt);J=new zt(Y,V-X.start,i),ce=X.start-C}else{let Y=l.pos-G;l.next();let ie=[],nt=[],Ct=I>=o?I:-1,qt=0,Gi=V;for(;l.pos>Y;)Ct>=0&&l.id==Ct&&l.size>=0?(l.end<=Gi-s&&(d(ie,nt,P,qt,l.end,Gi,Ct,$),qt=ie.length,Gi=l.end),l.next()):f(P,Y,ie,nt,Ct);if(Ct>=0&&qt>0&&qt-1&&qt>0){let ro=u(T);J=Wr(T,ie,nt,0,ie.length,0,V-P,ro,ro)}else J=p(T,ie,nt,V-P,$-V)}D.push(J),v.push(ce)}function u(C){return(A,D,v)=>{let U=0,I=A.length-1,P,V;if(I>=0&&(P=A[I])instanceof q){if(!I&&P.type==C&&P.length==v)return P;(V=P.prop(L.lookAhead))&&(U=D[I]+P.length+V)}return p(C,A,D,v,U)}}function d(C,A,D,v,U,I,P,V){let G=[],$=[];for(;C.length>v;)G.push(C.pop()),$.push(A.pop()+D-U);C.push(p(i.types[P],G,$,I-U,V-I)),A.push(U-D)}function p(C,A,D,v,U=0,I){if(h){let P=[L.contextHash,h];I=I?[P].concat(I):[P]}if(U>25){let P=[L.lookAhead,U];I=I?[P].concat(I):[P]}return new q(C,A,D,v,I)}function g(C,A){let D=l.fork(),v=0,U=0,I=0,P=D.end-s,V={size:0,start:0,skip:0};e:for(let G=D.pos-C;D.pos>G;){let $=D.size;if(D.id==A&&$>=0){V.size=v,V.start=U,V.skip=I,I+=4,v+=4,D.next();continue}let T=D.pos-$;if($<0||T=o?4:0,X=D.start;for(D.next();D.pos>T;){if(D.size<0)if(D.size==-3)J+=4;else break e;else D.id>=o&&(J+=4);D.next()}U=X,v+=$,I+=J}return(A<0||v==C)&&(V.size=v,V.start=U,V.skip=I),V.size>4?V:void 0}function b(C,A,D){let{id:v,start:U,end:I,size:P}=l;if(l.next(),P>=0&&v4){let G=l.pos-(P-4);for(;l.pos>G;)D=b(C,A,D)}A[--D]=V,A[--D]=I-C,A[--D]=U-C,A[--D]=v}else P==-3?h=v:P==-4&&(c=v);return D}let w=[],y=[];for(;l.pos>0;)f(n.start||0,n.bufferStart||0,w,y,-1);let S=(e=n.length)!==null&&e!==void 0?e:w.length?y[0]+w[0].length:0;return new q(a[n.topID],w.reverse(),y.reverse(),S)}const hl=new WeakMap;function kn(n,e){if(!n.isAnonymous||e instanceof zt||e.type!=n)return 1;let t=hl.get(e);if(t==null){t=1;for(let i of e.children){if(i.type!=n||!(i instanceof q)){t=1;break}t+=kn(n,i)}hl.set(e,t)}return t}function Wr(n,e,t,i,s,r,o,l,a){let h=0;for(let p=i;p=c)break;D+=v}if(S==C+1){if(D>c){let v=p[C];d(v.children,v.positions,0,v.children.length,g[C]+y);continue}f.push(p[C])}else{let v=g[S-1]+p[S-1].length-A;f.push(Wr(n,p,g,C,S,A,v,null,a))}u.push(A+y-r)}}return d(e,t,i,s,0),(l||a)(f,u,o)}class lb{constructor(){this.map=new WeakMap}setBuffer(e,t,i){let s=this.map.get(e);s||this.map.set(e,s=new Map),s.set(t,i)}getBuffer(e,t){let i=this.map.get(e);return i&&i.get(t)}set(e,t){e instanceof Xe?this.setBuffer(e.context.buffer,e.index,t):e instanceof Ne&&this.map.set(e.tree,t)}get(e){return e instanceof Xe?this.getBuffer(e.context.buffer,e.index):e instanceof Ne?this.map.get(e.tree):void 0}cursorSet(e,t){e.buffer?this.setBuffer(e.buffer.buffer,e.index,t):this.map.set(e.tree,t)}cursorGet(e){return e.buffer?this.getBuffer(e.buffer.buffer,e.index):this.map.get(e.tree)}}class rt{constructor(e,t,i,s,r=!1,o=!1){this.from=e,this.to=t,this.tree=i,this.offset=s,this.open=(r?1:0)|(o?2:0)}get openStart(){return(this.open&1)>0}get openEnd(){return(this.open&2)>0}static addTree(e,t=[],i=!1){let s=[new rt(0,e.length,e,0,!1,i)];for(let r of t)r.to>e.length&&s.push(r);return s}static applyChanges(e,t,i=128){if(!t.length)return e;let s=[],r=1,o=e.length?e[0]:null;for(let l=0,a=0,h=0;;l++){let c=l=i)for(;o&&o.from=u.from||f<=u.to||h){let d=Math.max(u.from,a)-h,p=Math.min(u.to,f)-h;u=d>=p?null:new rt(d,p,u.tree,u.offset+h,l>0,!!c)}if(u&&s.push(u),o.to>f)break;o=rnew Le(s.from,s.to)):[new Le(0,0)]:[new Le(0,e.length)],this.createParse(e,t||[],i)}parse(e,t,i){let s=this.startParse(e,t,i);for(;;){let r=s.advance();if(r)return r}}}class zd{constructor(e){this.string=e}get length(){return this.string.length}chunk(e){return this.string.slice(e)}get lineChunks(){return!1}read(e,t){return this.string.slice(e,t)}}function ab(n){return(e,t,i,s)=>new jd(e,n,t,i,s)}class cl{constructor(e,t,i,s,r){this.parser=e,this.parse=t,this.overlay=i,this.target=s,this.ranges=r}}class qd{constructor(e,t,i,s,r,o,l){this.parser=e,this.predicate=t,this.mounts=i,this.index=s,this.start=r,this.target=o,this.prev=l,this.depth=0,this.ranges=[]}}const pr=new L({perNode:!0});class jd{constructor(e,t,i,s,r){this.nest=t,this.input=i,this.fragments=s,this.ranges=r,this.inner=[],this.innerDone=0,this.baseTree=null,this.stoppedAt=null,this.baseParse=e}advance(){if(this.baseParse){let i=this.baseParse.advance();if(!i)return null;if(this.baseParse=null,this.baseTree=i,this.startInner(),this.stoppedAt!=null)for(let s of this.inner)s.parse.stopAt(this.stoppedAt)}if(this.innerDone==this.inner.length){let i=this.baseTree;return this.stoppedAt!=null&&(i=new q(i.type,i.children,i.positions,i.length,i.propValues.concat([[pr,this.stoppedAt]]))),i}let e=this.inner[this.innerDone],t=e.parse.advance();if(t){this.innerDone++;let i=Object.assign(Object.create(null),e.target.props);i[L.mounted.id]=new Vd(t,e.overlay,e.parser),e.target.props=i}return null}get parsedPos(){if(this.baseParse)return 0;let e=this.input.length;for(let t=this.innerDone;tc.frag.from<=s.from&&c.frag.to>=s.to&&c.mount.overlay);if(h)for(let c of h.mount.overlay){let f=c.from+h.pos,u=c.to+h.pos;f>=s.from&&u<=s.to&&!t.ranges.some(d=>d.fromf)&&t.ranges.push({from:f,to:u})}}l=!1}else if(i&&(o=Kd(i.ranges,s.from,s.to)))l=o!=2;else if(!s.type.isAnonymous&&s.fromnew Le(f.from-s.from,f.to-s.from)):null,s.tree,c)),r.overlay?c.length&&(i={ranges:c,depth:0,prev:i}):l=!1}}else t&&(a=t.predicate(s))&&(a===!0&&(a=new Le(s.from,s.to)),a.fromnew Le(c.from-t.start,c.to-t.start)),t.target,h)),t=t.prev}i&&!--i.depth&&(i=i.prev)}}}}function Kd(n,e,t){for(let i of n){if(i.from>=t)break;if(i.to>e)return i.from<=e&&i.to>=t?2:1}return 0}function fl(n,e,t,i,s,r){if(e=e.to);i++);let o=s.children[i],l=o.buffer;function a(h,c,f,u,d){let p=h;for(;l[p+2]+r<=e.from;)p=l[p+3];let g=[],b=[];fl(o,h,p,g,b,u);let w=l[p+1],y=l[p+2],S=w+r==e.from&&y+r==e.to&&l[p]==e.type.id;return g.push(S?e.toTree():a(p+4,l[p+3],o.set.types[l[p]],w,y-w)),b.push(w-u),fl(o,l[p+3],c,g,b,u),new q(f,g,b,d)}s.children[i]=a(0,l.length,xe.none,0,o.length);for(let h=0;h<=t;h++)n.childAfter(e.from)}class ul{constructor(e,t){this.offset=t,this.done=!1,this.cursor=e.cursor(ee.IncludeAnonymous|ee.IgnoreMounts)}moveTo(e){let{cursor:t}=this,i=e-this.offset;for(;!this.done&&t.from=e&&t.enter(i,1,ee.IgnoreOverlays|ee.ExcludeBuffers)||t.next(!1)||(this.done=!0)}hasNode(e){if(this.moveTo(e.from),!this.done&&this.cursor.from+this.offset==e.from&&this.cursor.tree)for(let t=this.cursor.tree;;){if(t==e.tree)return!0;if(t.children.length&&t.positions[0]==0&&t.children[0]instanceof q)t=t.children[0];else break}return!1}}class Gd{constructor(e){var t;if(this.fragments=e,this.curTo=0,this.fragI=0,e.length){let i=this.curFrag=e[0];this.curTo=(t=i.tree.prop(pr))!==null&&t!==void 0?t:i.to,this.inner=new ul(i.tree,-i.offset)}else this.curFrag=this.inner=null}hasNode(e){for(;this.curFrag&&e.from>=this.curTo;)this.nextFrag();return this.curFrag&&this.curFrag.from<=e.from&&this.curTo>=e.to&&this.inner.hasNode(e)}nextFrag(){var e;if(this.fragI++,this.fragI==this.fragments.length)this.curFrag=this.inner=null;else{let t=this.curFrag=this.fragments[this.fragI];this.curTo=(e=t.tree.prop(pr))!==null&&e!==void 0?e:t.to,this.inner=new ul(t.tree,-t.offset)}}findMounts(e,t){var i;let s=[];if(this.inner){this.inner.cursor.moveTo(e,1);for(let r=this.inner.cursor.node;r;r=r.parent){let o=(i=r.tree)===null||i===void 0?void 0:i.prop(L.mounted);if(o&&o.parser==t)for(let l=this.fragI;l=r.to)break;a.tree==this.curFrag.tree&&s.push({frag:a,pos:r.from-a.offset,mount:o})}}}return s}}function dl(n,e){let t=null,i=e;for(let s=1,r=0;s=l)break;a.to<=o||(t||(i=t=e.slice()),a.froml&&t.splice(r+1,0,new Le(l,a.to))):a.to>l?t[r--]=new Le(l,a.to):t.splice(r--,1))}}return i}function Jd(n,e,t,i){let s=0,r=0,o=!1,l=!1,a=-1e9,h=[];for(;;){let c=s==n.length?1e9:o?n[s].to:n[s].from,f=r==e.length?1e9:l?e[r].to:e[r].from;if(o!=l){let u=Math.max(a,t),d=Math.min(c,f,i);unew Le(u.from+i,u.to+i)),f=Jd(e,c,a,h);for(let u=0,d=a;;u++){let p=u==f.length,g=p?h:f[u].from;if(g>d&&t.push(new rt(d,g,s.tree,-o,r.from>=d||r.openStart,r.to<=g||r.openEnd)),p)break;d=f[u].to}}else t.push(new rt(a,h,s.tree,-o,r.from>=o||r.openStart,r.to<=l||r.openEnd))}return t}let Yd=0;class Ge{constructor(e,t,i){this.set=e,this.base=t,this.modified=i,this.id=Yd++}static define(e){if(e?.base)throw new Error("Can not derive from a modified tag");let t=new Ge([],null,[]);if(t.set.push(t),e)for(let i of e.set)t.set.push(i);return t}static defineModifier(){let e=new In;return t=>t.modified.indexOf(e)>-1?t:In.get(t.base||t,t.modified.concat(e).sort((i,s)=>i.id-s.id))}}let Xd=0;class In{constructor(){this.instances=[],this.id=Xd++}static get(e,t){if(!t.length)return e;let i=t[0].instances.find(l=>l.base==e&&Zd(t,l.modified));if(i)return i;let s=[],r=new Ge(s,e,t);for(let l of t)l.instances.push(r);let o=Qd(t);for(let l of e.set)if(!l.modified.length)for(let a of o)s.push(In.get(l,a));return r}}function Zd(n,e){return n.length==e.length&&n.every((t,i)=>t==e[i])}function Qd(n){let e=[[]];for(let t=0;ti.length-t.length)}function $d(n){let e=Object.create(null);for(let t in n){let i=n[t];Array.isArray(i)||(i=[i]);for(let s of t.split(" "))if(s){let r=[],o=2,l=s;for(let f=0;;){if(l=="..."&&f>0&&f+3==s.length){o=1;break}let u=/^"(?:[^"\\]|\\.)*?"|[^\/!]+/.exec(l);if(!u)throw new RangeError("Invalid path: "+s);if(r.push(u[0]=="*"?"":u[0][0]=='"'?JSON.parse(u[0]):u[0]),f+=u[0].length,f==s.length)break;let d=s[f++];if(f==s.length&&d=="!"){o=0;break}if(d!="/")throw new RangeError("Invalid path: "+s);l=s.slice(f)}let a=r.length-1,h=r[a];if(!h)throw new RangeError("Invalid path: "+s);let c=new _n(i,o,a>0?r.slice(0,a):null);e[h]=c.sort(e[h])}}return _h.add(e)}const _h=new L;class _n{constructor(e,t,i,s){this.tags=e,this.mode=t,this.context=i,this.next=s}get opaque(){return this.mode==0}get inherit(){return this.mode==1}sort(e){return!e||e.depth{let o=s;for(let l of r)for(let a of l.set){let h=t[a.id];if(h){o=o?o+" "+h:h;break}}return o},scope:i}}function ep(n,e){let t=null;for(let i of n){let s=i.style(e);s&&(t=t?t+" "+s:s)}return t}function tp(n,e,t,i=0,s=n.length){let r=new ip(i,Array.isArray(e)?e:[e],t);r.highlightRange(n.cursor(),i,s,"",r.highlighters),r.flush(s)}class ip{constructor(e,t,i){this.at=e,this.highlighters=t,this.span=i,this.class=""}startSpan(e,t){t!=this.class&&(this.flush(e),e>this.at&&(this.at=e),this.class=t)}flush(e){e>this.at&&this.class&&this.span(this.at,e,this.class)}highlightRange(e,t,i,s,r){let{type:o,from:l,to:a}=e;if(l>=i||a<=t)return;o.isTop&&(r=this.highlighters.filter(d=>!d.scope||d.scope(o)));let h=s,c=np(e)||_n.empty,f=ep(r,c.tags);if(f&&(h&&(h+=" "),h+=f,c.mode==1&&(s+=(s?" ":"")+f)),this.startSpan(e.from,h),c.opaque)return;let u=e.tree&&e.tree.prop(L.mounted);if(u&&u.overlay){let d=e.node.enter(u.overlay[0].from+l,1),p=this.highlighters.filter(b=>!b.scope||b.scope(u.tree.type)),g=e.firstChild();for(let b=0,w=l;;b++){let y=b=S||!e.nextSibling())););if(!y||S>i)break;w=y.to+l,w>t&&(this.highlightRange(d.cursor(),Math.max(t,y.from+l),Math.min(i,w),s,p),this.startSpan(w,h))}g&&e.parent()}else if(e.firstChild()){do if(!(e.to<=t)){if(e.from>=i)break;this.highlightRange(e,t,i,s,r),this.startSpan(Math.min(i,e.to),h)}while(e.nextSibling());e.parent()}}}function np(n){let e=n.type.prop(_h);for(;e&&e.context&&!n.matchContext(e.context);)e=e.next;return e||null}const x=Ge.define,ln=x(),ot=x(),ml=x(ot),gl=x(ot),lt=x(),an=x(lt),ys=x(lt),Ke=x(),At=x(Ke),qe=x(),je=x(),mr=x(),mi=x(mr),hn=x(),m={comment:ln,lineComment:x(ln),blockComment:x(ln),docComment:x(ln),name:ot,variableName:x(ot),typeName:ml,tagName:x(ml),propertyName:gl,attributeName:x(gl),className:x(ot),labelName:x(ot),namespace:x(ot),macroName:x(ot),literal:lt,string:an,docString:x(an),character:x(an),attributeValue:x(an),number:ys,integer:x(ys),float:x(ys),bool:x(lt),regexp:x(lt),escape:x(lt),color:x(lt),url:x(lt),keyword:qe,self:x(qe),null:x(qe),atom:x(qe),unit:x(qe),modifier:x(qe),operatorKeyword:x(qe),controlKeyword:x(qe),definitionKeyword:x(qe),moduleKeyword:x(qe),operator:je,derefOperator:x(je),arithmeticOperator:x(je),logicOperator:x(je),bitwiseOperator:x(je),compareOperator:x(je),updateOperator:x(je),definitionOperator:x(je),typeOperator:x(je),controlOperator:x(je),punctuation:mr,separator:x(mr),bracket:mi,angleBracket:x(mi),squareBracket:x(mi),paren:x(mi),brace:x(mi),content:Ke,heading:At,heading1:x(At),heading2:x(At),heading3:x(At),heading4:x(At),heading5:x(At),heading6:x(At),contentSeparator:x(Ke),list:x(Ke),quote:x(Ke),emphasis:x(Ke),strong:x(Ke),link:x(Ke),monospace:x(Ke),strikethrough:x(Ke),inserted:x(),deleted:x(),changed:x(),invalid:x(),meta:hn,documentMeta:x(hn),annotation:x(hn),processingInstruction:x(hn),definition:Ge.defineModifier(),constant:Ge.defineModifier(),function:Ge.defineModifier(),standard:Ge.defineModifier(),local:Ge.defineModifier(),special:Ge.defineModifier()};Nh([{tag:m.link,class:"tok-link"},{tag:m.heading,class:"tok-heading"},{tag:m.emphasis,class:"tok-emphasis"},{tag:m.strong,class:"tok-strong"},{tag:m.keyword,class:"tok-keyword"},{tag:m.atom,class:"tok-atom"},{tag:m.bool,class:"tok-bool"},{tag:m.url,class:"tok-url"},{tag:m.labelName,class:"tok-labelName"},{tag:m.inserted,class:"tok-inserted"},{tag:m.deleted,class:"tok-deleted"},{tag:m.literal,class:"tok-literal"},{tag:m.string,class:"tok-string"},{tag:m.number,class:"tok-number"},{tag:[m.regexp,m.escape,m.special(m.string)],class:"tok-string2"},{tag:m.variableName,class:"tok-variableName"},{tag:m.local(m.variableName),class:"tok-variableName tok-local"},{tag:m.definition(m.variableName),class:"tok-variableName tok-definition"},{tag:m.special(m.variableName),class:"tok-variableName2"},{tag:m.definition(m.propertyName),class:"tok-propertyName tok-definition"},{tag:m.typeName,class:"tok-typeName"},{tag:m.namespace,class:"tok-namespace"},{tag:m.className,class:"tok-className"},{tag:m.macroName,class:"tok-macroName"},{tag:m.propertyName,class:"tok-propertyName"},{tag:m.operator,class:"tok-operator"},{tag:m.comment,class:"tok-comment"},{tag:m.meta,class:"tok-meta"},{tag:m.invalid,class:"tok-invalid"},{tag:m.punctuation,class:"tok-punctuation"}]);var ws;const Bt=new L;function Vh(n){return O.define({combine:n?e=>e.concat(n):void 0})}const sp=new L;class Ie{constructor(e,t,i=[],s=""){this.data=e,this.name=s,_.prototype.hasOwnProperty("tree")||Object.defineProperty(_.prototype,"tree",{get(){return pe(this)}}),this.parser=t,this.extension=[wt.of(this),_.languageData.of((r,o,l)=>{let a=bl(r,o,l),h=a.type.prop(Bt);if(!h)return[];let c=r.facet(h),f=a.type.prop(sp);if(f){let u=a.resolve(o-a.from,l);for(let d of f)if(d.test(u,r)){let p=r.facet(d.facet);return d.type=="replace"?p:p.concat(c)}}return c})].concat(i)}isActiveAt(e,t,i=-1){return bl(e,t,i).type.prop(Bt)==this.data}findRegions(e){let t=e.facet(wt);if(t?.data==this.data)return[{from:0,to:e.doc.length}];if(!t||!t.allowsNesting)return[];let i=[],s=(r,o)=>{if(r.prop(Bt)==this.data){i.push({from:o,to:o+r.length});return}let l=r.prop(L.mounted);if(l){if(l.tree.prop(Bt)==this.data){if(l.overlay)for(let a of l.overlay)i.push({from:a.from+o,to:a.to+o});else i.push({from:o,to:o+r.length});return}else if(l.overlay){let a=i.length;if(s(l.tree,l.overlay[0].from+o),i.length>a)return}}for(let a=0;ai.isTop?t:void 0)]}),e.name)}configure(e,t){return new gr(this.data,this.parser.configure(e),t||this.name)}get allowsNesting(){return this.parser.hasWrappers()}}function pe(n){let e=n.field(Ie.state,!1);return e?e.tree:q.empty}class rp{constructor(e){this.doc=e,this.cursorPos=0,this.string="",this.cursor=e.iter()}get length(){return this.doc.length}syncTo(e){return this.string=this.cursor.next(e-this.cursorPos).value,this.cursorPos=e+this.string.length,this.cursorPos-this.string.length}chunk(e){return this.syncTo(e),this.string}get lineChunks(){return!0}read(e,t){let i=this.cursorPos-this.string.length;return e=this.cursorPos?this.doc.sliceString(e,t):this.string.slice(e-i,t-i)}}let gi=null;class li{constructor(e,t,i=[],s,r,o,l,a){this.parser=e,this.state=t,this.fragments=i,this.tree=s,this.treeLen=r,this.viewport=o,this.skipped=l,this.scheduleOn=a,this.parse=null,this.tempSkipped=[]}static create(e,t,i){return new li(e,t,[],q.empty,0,i,[],null)}startParse(){return this.parser.startParse(new rp(this.state.doc),this.fragments)}work(e,t){return t!=null&&t>=this.state.doc.length&&(t=void 0),this.tree!=q.empty&&this.isDone(t??this.state.doc.length)?(this.takeTree(),!0):this.withContext(()=>{var i;if(typeof e=="number"){let s=Date.now()+e;e=()=>Date.now()>s}for(this.parse||(this.parse=this.startParse()),t!=null&&(this.parse.stoppedAt==null||this.parse.stoppedAt>t)&&t=this.treeLen&&((this.parse.stoppedAt==null||this.parse.stoppedAt>e)&&this.parse.stopAt(e),this.withContext(()=>{for(;!(t=this.parse.advance()););}),this.treeLen=e,this.tree=t,this.fragments=this.withoutTempSkipped(rt.addTree(this.tree,this.fragments,!0)),this.parse=null)}withContext(e){let t=gi;gi=this;try{return e()}finally{gi=t}}withoutTempSkipped(e){for(let t;t=this.tempSkipped.pop();)e=yl(e,t.from,t.to);return e}changes(e,t){let{fragments:i,tree:s,treeLen:r,viewport:o,skipped:l}=this;if(this.takeTree(),!e.empty){let a=[];if(e.iterChangedRanges((h,c,f,u)=>a.push({fromA:h,toA:c,fromB:f,toB:u})),i=rt.applyChanges(i,a),s=q.empty,r=0,o={from:e.mapPos(o.from,-1),to:e.mapPos(o.to,1)},this.skipped.length){l=[];for(let h of this.skipped){let c=e.mapPos(h.from,1),f=e.mapPos(h.to,-1);ce.from&&(this.fragments=yl(this.fragments,s,r),this.skipped.splice(i--,1))}return this.skipped.length>=t?!1:(this.reset(),!0)}reset(){this.parse&&(this.takeTree(),this.parse=null)}skipUntilInView(e,t){this.skipped.push({from:e,to:t})}static getSkippingParser(e){return new class extends Ih{createParse(t,i,s){let r=s[0].from,o=s[s.length-1].to;return{parsedPos:r,advance(){let a=gi;if(a){for(let h of s)a.tempSkipped.push(h);e&&(a.scheduleOn=a.scheduleOn?Promise.all([a.scheduleOn,e]):e)}return this.parsedPos=o,new q(xe.none,[],[],o-r)},stoppedAt:null,stopAt(){}}}}}isDone(e){e=Math.min(e,this.state.doc.length);let t=this.fragments;return this.treeLen>=e&&t.length&&t[0].from==0&&t[0].to>=e}static get(){return gi}}function yl(n,e,t){return rt.applyChanges(n,[{fromA:e,toA:t,fromB:e,toB:t}])}class ai{constructor(e){this.context=e,this.tree=e.tree}apply(e){if(!e.docChanged&&this.tree==this.context.tree)return this;let t=this.context.changes(e.changes,e.state),i=this.context.treeLen==e.startState.doc.length?void 0:Math.max(e.changes.mapPos(this.context.treeLen),t.viewport.to);return t.work(20,i)||t.takeTree(),new ai(t)}static init(e){let t=Math.min(3e3,e.doc.length),i=li.create(e.facet(wt).parser,e,{from:0,to:t});return i.work(20,t)||i.takeTree(),new ai(i)}}Ie.state=Me.define({create:ai.init,update(n,e){for(let t of e.effects)if(t.is(Ie.setState))return t.value;return e.startState.facet(wt)!=e.state.facet(wt)?ai.init(e.state):n.apply(e)}});let Fh=n=>{let e=setTimeout(()=>n(),500);return()=>clearTimeout(e)};typeof requestIdleCallback<"u"&&(Fh=n=>{let e=-1,t=setTimeout(()=>{e=requestIdleCallback(n,{timeout:500-100})},100);return()=>e<0?clearTimeout(t):cancelIdleCallback(e)});const ks=typeof navigator<"u"&&(!((ws=navigator.scheduling)===null||ws===void 0)&&ws.isInputPending)?()=>navigator.scheduling.isInputPending():null,op=ye.fromClass(class{constructor(e){this.view=e,this.working=null,this.workScheduled=0,this.chunkEnd=-1,this.chunkBudget=-1,this.work=this.work.bind(this),this.scheduleWork()}update(e){let t=this.view.state.field(Ie.state).context;(t.updateViewport(e.view.viewport)||this.view.viewport.to>t.treeLen)&&this.scheduleWork(),e.docChanged&&(this.view.hasFocus&&(this.chunkBudget+=50),this.scheduleWork()),this.checkAsyncSchedule(t)}scheduleWork(){if(this.working)return;let{state:e}=this.view,t=e.field(Ie.state);(t.tree!=t.context.tree||!t.context.isDone(e.doc.length))&&(this.working=Fh(this.work))}work(e){this.working=null;let t=Date.now();if(this.chunkEnds+1e3,a=r.context.work(()=>ks&&ks()||Date.now()>o,s+(l?0:1e5));this.chunkBudget-=Date.now()-t,(a||this.chunkBudget<=0)&&(r.context.takeTree(),this.view.dispatch({effects:Ie.setState.of(new ai(r.context))})),this.chunkBudget>0&&!(a&&!l)&&this.scheduleWork(),this.checkAsyncSchedule(r.context)}checkAsyncSchedule(e){e.scheduleOn&&(this.workScheduled++,e.scheduleOn.then(()=>this.scheduleWork()).catch(t=>He(this.view.state,t)).then(()=>this.workScheduled--),e.scheduleOn=null)}destroy(){this.working&&this.working()}isWorking(){return!!(this.working||this.workScheduled>0)}},{eventHandlers:{focus(){this.scheduleWork()}}}),wt=O.define({combine(n){return n.length?n[0]:null},enables:n=>[Ie.state,op,B.contentAttributes.compute([n],e=>{let t=e.facet(n);return t&&t.name?{"data-language":t.name}:{}})]});class cb{constructor(e,t=[]){this.language=e,this.support=t,this.extension=[e,t]}}class Hh{constructor(e,t,i,s,r,o=void 0){this.name=e,this.alias=t,this.extensions=i,this.filename=s,this.loadFunc=r,this.support=o,this.loading=null}load(){return this.loading||(this.loading=this.loadFunc().then(e=>this.support=e,e=>{throw this.loading=null,e}))}static of(e){let{load:t,support:i}=e;if(!t){if(!i)throw new RangeError("Must pass either 'load' or 'support' to LanguageDescription.of");t=()=>Promise.resolve(i)}return new Hh(e.name,(e.alias||[]).concat(e.name).map(s=>s.toLowerCase()),e.extensions||[],e.filename,t,i)}static matchFilename(e,t){for(let s of e)if(s.filename&&s.filename.test(t))return s;let i=/\.([^.]+)$/.exec(t);if(i){for(let s of e)if(s.extensions.indexOf(i[1])>-1)return s}return null}static matchLanguageName(e,t,i=!0){t=t.toLowerCase();for(let s of e)if(s.alias.some(r=>r==t))return s;if(i)for(let s of e)for(let r of s.alias){let o=t.indexOf(r);if(o>-1&&(r.length>2||!/\w/.test(t[o-1])&&!/\w/.test(t[o+r.length])))return s}return null}}const Wh=O.define(),Jn=O.define({combine:n=>{if(!n.length)return" ";let e=n[0];if(!e||/\S/.test(e)||Array.from(e).some(t=>t!=e[0]))throw new Error("Invalid indent unit: "+JSON.stringify(n[0]));return e}});function Nt(n){let e=n.facet(Jn);return e.charCodeAt(0)==9?n.tabSize*e.length:e.length}function Ni(n,e){let t="",i=n.tabSize,s=n.facet(Jn)[0];if(s==" "){for(;e>=i;)t+=" ",e-=i;s=" "}for(let r=0;r=i.from&&s<=i.to?r&&s==e?{text:"",from:e}:(t<0?s-1&&(r+=o-this.countColumn(i,i.search(/\S|$/))),r}countColumn(e,t=e.length){return zi(e,this.state.tabSize,t)}lineIndent(e,t=1){let{text:i,from:s}=this.lineAt(e,t),r=this.options.overrideIndentation;if(r){let o=r(s);if(o>-1)return o}return this.countColumn(i,i.search(/\S|$/))}get simulatedBreak(){return this.options.simulateBreak||null}}const lp=new L;function ap(n,e,t){return zh(e.resolveInner(t).enterUnfinishedNodesBefore(t),t,n)}function hp(n){return n.pos==n.options.simulateBreak&&n.options.simulateDoubleBreak}function cp(n){let e=n.type.prop(lp);if(e)return e;let t=n.firstChild,i;if(t&&(i=t.type.prop(L.closedBy))){let s=n.lastChild,r=s&&i.indexOf(s.name)>-1;return o=>qh(o,!0,1,void 0,r&&!hp(o)?s.from:void 0)}return n.parent==null?fp:null}function zh(n,e,t){for(;n;n=n.parent){let i=cp(n);if(i)return i(qr.create(t,e,n))}return null}function fp(){return 0}class qr extends Yn{constructor(e,t,i){super(e.state,e.options),this.base=e,this.pos=t,this.node=i}static create(e,t,i){return new qr(e,t,i)}get textAfter(){return this.textAfterPos(this.pos)}get baseIndent(){let e=this.state.doc.lineAt(this.node.from);for(;;){let t=this.node.resolve(e.from);for(;t.parent&&t.parent.from==t.from;)t=t.parent;if(up(t,this.node))break;e=this.state.doc.lineAt(t.from)}return this.lineIndent(e.from)}continue(){let e=this.node.parent;return e?zh(e,this.pos,this.base):0}}function up(n,e){for(let t=e;t;t=t.parent)if(n==t)return!0;return!1}function dp(n){let e=n.node,t=e.childAfter(e.from),i=e.lastChild;if(!t)return null;let s=n.options.simulateBreak,r=n.state.doc.lineAt(t.from),o=s==null||s<=r.from?r.to:Math.min(r.to,s);for(let l=t.to;;){let a=e.childAfter(l);if(!a||a==i)return null;if(!a.type.isSkipped)return a.fromqh(i,e,t,n)}function qh(n,e,t,i,s){let r=n.textAfter,o=r.match(/^\s*/)[0].length,l=i&&r.slice(o,o+i.length)==i||s==n.pos+o,a=e?dp(n):null;return a?l?n.column(a.from):n.column(a.to):n.baseIndent+(l?0:n.unit*t)}const ub=n=>n.baseIndent;function db({except:n,units:e=1}={}){return t=>{let i=n&&n.test(t.textAfter);return t.baseIndent+(i?0:e*t.unit)}}const pp=200;function mp(){return _.transactionFilter.of(n=>{if(!n.docChanged||!n.isUserEvent("input.type")&&!n.isUserEvent("input.complete"))return n;let e=n.startState.languageDataAt("indentOnInput",n.startState.selection.main.head);if(!e.length)return n;let t=n.newDoc,{head:i}=n.newSelection.main,s=t.lineAt(i);if(i>s.from+pp)return n;let r=t.sliceString(s.from,i);if(!e.some(h=>h.test(r)))return n;let{state:o}=n,l=-1,a=[];for(let{head:h}of o.selection.ranges){let c=o.doc.lineAt(h);if(c.from==l)continue;l=c.from;let f=zr(o,c.from);if(f==null)continue;let u=/^\s*/.exec(c.text)[0],d=Ni(o,f);u!=d&&a.push({from:c.from,to:c.from+u.length,insert:d})}return a.length?[n,{changes:a,sequential:!0}]:n})}const gp=O.define(),bp=new L;function pb(n){let e=n.firstChild,t=n.lastChild;return e&&e.tot)continue;if(r&&o.from=e&&a.to>t&&(r=a)}}return r}function wp(n){let e=n.lastChild;return e&&e.to==n.to&&e.type.isError}function Nn(n,e,t){for(let i of n.facet(gp)){let s=i(n,e,t);if(s)return s}return yp(n,e,t)}function jh(n,e){let t=e.mapPos(n.from,1),i=e.mapPos(n.to,-1);return t>=i?void 0:{from:t,to:i}}const Xn=R.define({map:jh}),ji=R.define({map:jh});function Kh(n){let e=[];for(let{head:t}of n.state.selection.ranges)e.some(i=>i.from<=t&&i.to>=t)||e.push(n.lineBlockAt(t));return e}const Vt=Me.define({create(){return E.none},update(n,e){n=n.map(e.changes);for(let t of e.effects)t.is(Xn)&&!kp(n,t.value.from,t.value.to)?n=n.update({add:[wl.range(t.value.from,t.value.to)]}):t.is(ji)&&(n=n.update({filter:(i,s)=>t.value.from!=i||t.value.to!=s,filterFrom:t.value.from,filterTo:t.value.to}));if(e.selection){let t=!1,{head:i}=e.selection.main;n.between(i,i,(s,r)=>{si&&(t=!0)}),t&&(n=n.update({filterFrom:i,filterTo:i,filter:(s,r)=>r<=i||s>=i}))}return n},provide:n=>B.decorations.from(n),toJSON(n,e){let t=[];return n.between(0,e.doc.length,(i,s)=>{t.push(i,s)}),t},fromJSON(n){if(!Array.isArray(n)||n.length%2)throw new RangeError("Invalid JSON for fold state");let e=[];for(let t=0;t{(!s||s.from>r)&&(s={from:r,to:o})}),s}function kp(n,e,t){let i=!1;return n.between(e,e,(s,r)=>{s==e&&r==t&&(i=!0)}),i}function Uh(n,e){return n.field(Vt,!1)?e:e.concat(R.appendConfig.of(Yh()))}const vp=n=>{for(let e of Kh(n)){let t=Nn(n.state,e.from,e.to);if(t)return n.dispatch({effects:Uh(n.state,[Xn.of(t),Gh(n,t)])}),!0}return!1},xp=n=>{if(!n.state.field(Vt,!1))return!1;let e=[];for(let t of Kh(n)){let i=Vn(n.state,t.from,t.to);i&&e.push(ji.of(i),Gh(n,i,!1))}return e.length&&n.dispatch({effects:e}),e.length>0};function Gh(n,e,t=!0){let i=n.state.doc.lineAt(e.from).number,s=n.state.doc.lineAt(e.to).number;return B.announce.of(`${n.state.phrase(t?"Folded lines":"Unfolded lines")} ${i} ${n.state.phrase("to")} ${s}.`)}const Sp=n=>{let{state:e}=n,t=[];for(let i=0;i{let e=n.state.field(Vt,!1);if(!e||!e.size)return!1;let t=[];return e.between(0,n.state.doc.length,(i,s)=>{t.push(ji.of({from:i,to:s}))}),n.dispatch({effects:t}),!0},Ap=[{key:"Ctrl-Shift-[",mac:"Cmd-Alt-[",run:vp},{key:"Ctrl-Shift-]",mac:"Cmd-Alt-]",run:xp},{key:"Ctrl-Alt-[",run:Sp},{key:"Ctrl-Alt-]",run:Cp}],Mp={placeholderDOM:null,placeholderText:"…"},Jh=O.define({combine(n){return Wt(n,Mp)}});function Yh(n){let e=[Vt,Op];return n&&e.push(Jh.of(n)),e}const wl=E.replace({widget:new class extends tt{toDOM(n){let{state:e}=n,t=e.facet(Jh),i=r=>{let o=n.lineBlockAt(n.posAtDOM(r.target)),l=Vn(n.state,o.from,o.to);l&&n.dispatch({effects:ji.of(l)}),r.preventDefault()};if(t.placeholderDOM)return t.placeholderDOM(n,i);let s=document.createElement("span");return s.textContent=t.placeholderText,s.setAttribute("aria-label",e.phrase("folded code")),s.title=e.phrase("unfold"),s.className="cm-foldPlaceholder",s.onclick=i,s}}}),Dp={openText:"⌄",closedText:"›",markerDOM:null,domEventHandlers:{},foldingChanged:()=>!1};class vs extends yt{constructor(e,t){super(),this.config=e,this.open=t}eq(e){return this.config==e.config&&this.open==e.open}toDOM(e){if(this.config.markerDOM)return this.config.markerDOM(this.open);let t=document.createElement("span");return t.textContent=this.open?this.config.openText:this.config.closedText,t.title=e.state.phrase(this.open?"Fold line":"Unfold line"),t}}function Tp(n={}){let e=Object.assign(Object.assign({},Dp),n),t=new vs(e,!0),i=new vs(e,!1),s=ye.fromClass(class{constructor(o){this.from=o.viewport.from,this.markers=this.buildMarkers(o)}update(o){(o.docChanged||o.viewportChanged||o.startState.facet(wt)!=o.state.facet(wt)||o.startState.field(Vt,!1)!=o.state.field(Vt,!1)||pe(o.startState)!=pe(o.state)||e.foldingChanged(o))&&(this.markers=this.buildMarkers(o.view))}buildMarkers(o){let l=new It;for(let a of o.viewportLineBlocks){let h=Vn(o.state,a.from,a.to)?i:Nn(o.state,a.from,a.to)?t:null;h&&l.add(a.from,a.from,h)}return l.finish()}}),{domEventHandlers:r}=e;return[s,Od({class:"cm-foldGutter",markers(o){var l;return((l=o.plugin(s))===null||l===void 0?void 0:l.markers)||H.empty},initialSpacer(){return new vs(e,!1)},domEventHandlers:Object.assign(Object.assign({},r),{click:(o,l,a)=>{if(r.click&&r.click(o,l,a))return!0;let h=Vn(o.state,l.from,l.to);if(h)return o.dispatch({effects:ji.of(h)}),!0;let c=Nn(o.state,l.from,l.to);return c?(o.dispatch({effects:Xn.of(c)}),!0):!1}})}),Yh()]}const Op=B.baseTheme({".cm-foldPlaceholder":{backgroundColor:"#eee",border:"1px solid #ddd",color:"#888",borderRadius:".2em",margin:"0 1px",padding:"0 1px",cursor:"pointer"},".cm-foldGutter span":{padding:"0 1px",cursor:"pointer"}});class ci{constructor(e,t){this.specs=e;let i;function s(l){let a=mt.newName();return(i||(i=Object.create(null)))["."+a]=l,a}const r=typeof t.all=="string"?t.all:t.all?s(t.all):void 0,o=t.scope;this.scope=o instanceof Ie?l=>l.prop(Bt)==o.data:o?l=>l==o:void 0,this.style=Nh(e.map(l=>({tag:l.tag,class:l.class||s(Object.assign({},l,{tag:null}))})),{all:r}).style,this.module=i?new mt(i):null,this.themeType=t.themeType}static define(e,t){return new ci(e,t||{})}}const br=O.define(),Xh=O.define({combine(n){return n.length?[n[0]]:null}});function xs(n){let e=n.facet(br);return e.length?e:n.facet(Xh)}function jr(n,e){let t=[Pp],i;return n instanceof ci&&(n.module&&t.push(B.styleModule.of(n.module)),i=n.themeType),e?.fallback?t.push(Xh.of(n)):i?t.push(br.computeN([B.darkTheme],s=>s.facet(B.darkTheme)==(i=="dark")?[n]:[])):t.push(br.of(n)),t}class Bp{constructor(e){this.markCache=Object.create(null),this.tree=pe(e.state),this.decorations=this.buildDeco(e,xs(e.state))}update(e){let t=pe(e.state),i=xs(e.state),s=i!=xs(e.startState);t.length{i.add(o,l,this.markCache[a]||(this.markCache[a]=E.mark({class:a})))},s,r);return i.finish()}}const Pp=Wi.high(ye.fromClass(Bp,{decorations:n=>n.decorations})),Ep=ci.define([{tag:m.meta,color:"#404740"},{tag:m.link,textDecoration:"underline"},{tag:m.heading,textDecoration:"underline",fontWeight:"bold"},{tag:m.emphasis,fontStyle:"italic"},{tag:m.strong,fontWeight:"bold"},{tag:m.strikethrough,textDecoration:"line-through"},{tag:m.keyword,color:"#708"},{tag:[m.atom,m.bool,m.url,m.contentSeparator,m.labelName],color:"#219"},{tag:[m.literal,m.inserted],color:"#164"},{tag:[m.string,m.deleted],color:"#a11"},{tag:[m.regexp,m.escape,m.special(m.string)],color:"#e40"},{tag:m.definition(m.variableName),color:"#00f"},{tag:m.local(m.variableName),color:"#30a"},{tag:[m.typeName,m.namespace],color:"#085"},{tag:m.className,color:"#167"},{tag:[m.special(m.variableName),m.macroName],color:"#256"},{tag:m.definition(m.propertyName),color:"#00c"},{tag:m.comment,color:"#940"},{tag:m.invalid,color:"#f00"}]),Rp=1e4,Lp="()[]{}",Ip=new L;function yr(n,e,t){let i=n.prop(e<0?L.openedBy:L.closedBy);if(i)return i;if(n.name.length==1){let s=t.indexOf(n.name);if(s>-1&&s%2==(e<0?1:0))return[t[s+e]]}return null}function wr(n){let e=n.type.prop(Ip);return e?e(n.node):n}function Gt(n,e,t,i={}){let s=i.maxScanDistance||Rp,r=i.brackets||Lp,o=pe(n),l=o.resolveInner(e,t);for(let a=l;a;a=a.parent){let h=yr(a.type,t,r);if(h&&a.from0?e>=c.from&&ec.from&&e<=c.to))return _p(n,e,t,a,c,h,r)}}return Np(n,e,t,o,l.type,s,r)}function _p(n,e,t,i,s,r,o){let l=i.parent,a={from:s.from,to:s.to},h=0,c=l?.cursor();if(c&&(t<0?c.childBefore(i.from):c.childAfter(i.to)))do if(t<0?c.to<=i.from:c.from>=i.to){if(h==0&&r.indexOf(c.type.name)>-1&&c.from0)return null;let h={from:t<0?e-1:e,to:t>0?e+1:e},c=n.doc.iterRange(e,t>0?n.doc.length:0),f=0;for(let u=0;!c.next().done&&u<=r;){let d=c.value;t<0&&(u+=d.length);let p=e+u*t;for(let g=t>0?0:d.length-1,b=t>0?d.length:-1;g!=b;g+=t){let w=o.indexOf(d[g]);if(!(w<0||i.resolveInner(p+g,1).type!=s))if(w%2==0==t>0)f++;else{if(f==1)return{start:h,end:{from:p+g,to:p+g+1},matched:w>>1==a>>1};f--}}t>0&&(u+=d.length)}return c.done?{start:h,matched:!1}:null}function kl(n,e,t,i=0,s=0){e==null&&(e=n.search(/[^\s\u00a0]/),e==-1&&(e=n.length));let r=s;for(let o=i;o=this.string.length}sol(){return this.pos==0}peek(){return this.string.charAt(this.pos)||void 0}next(){if(this.post}eatSpace(){let e=this.pos;for(;/[\s\u00a0]/.test(this.string.charAt(this.pos));)++this.pos;return this.pos>e}skipToEnd(){this.pos=this.string.length}skipTo(e){let t=this.string.indexOf(e,this.pos);if(t>-1)return this.pos=t,!0}backUp(e){this.pos-=e}column(){return this.lastColumnPosi?o.toLowerCase():o,r=this.string.substr(this.pos,e.length);return s(r)==s(e)?(t!==!1&&(this.pos+=e.length),!0):null}else{let s=this.string.slice(this.pos).match(e);return s&&s.index>0?null:(s&&t!==!1&&(this.pos+=s[0].length),s)}}current(){return this.string.slice(this.start,this.pos)}}function Vp(n){return{name:n.name||"",token:n.token,blankLine:n.blankLine||(()=>{}),startState:n.startState||(()=>!0),copyState:n.copyState||Fp,indent:n.indent||(()=>null),languageData:n.languageData||{},tokenTable:n.tokenTable||Ur}}function Fp(n){if(typeof n!="object")return n;let e={};for(let t in n){let i=n[t];e[t]=i instanceof Array?i.slice():i}return e}const vl=new WeakMap;class Jt extends Ie{constructor(e){let t=Vh(e.languageData),i=Vp(e),s,r=new class extends Ih{createParse(o,l,a){return new Wp(s,o,l,a)}};super(t,r,[Wh.of((o,l)=>this.getIndent(o,l))],e.name),this.topNode=jp(t),s=this,this.streamParser=i,this.stateAfter=new L({perNode:!0}),this.tokenTable=e.tokenTable?new tc(i.tokenTable):qp}static define(e){return new Jt(e)}getIndent(e,t){let i=pe(e.state),s=i.resolve(t);for(;s&&s.type!=this.topNode;)s=s.parent;if(!s)return null;let r,{overrideIndentation:o}=e.options;o&&(r=vl.get(e.state),r!=null&&r1e4)return null;for(;a=i&&t+e.length<=s&&e.prop(n.stateAfter);if(r)return{state:n.streamParser.copyState(r),pos:t+e.length};for(let o=e.children.length-1;o>=0;o--){let l=e.children[o],a=t+e.positions[o],h=l instanceof q&&a=e.length)return e;!s&&e.type==n.topNode&&(s=!0);for(let r=e.children.length-1;r>=0;r--){let o=e.positions[r],l=e.children[r],a;if(ot&&Kr(n,s.tree,0-s.offset,t,o),a;if(l&&(a=Qh(n,s.tree,t+s.offset,l.pos+s.offset,!1)))return{state:l.state,tree:a}}return{state:n.streamParser.startState(i?Nt(i):4),tree:q.empty}}class Wp{constructor(e,t,i,s){this.lang=e,this.input=t,this.fragments=i,this.ranges=s,this.stoppedAt=null,this.chunks=[],this.chunkPos=[],this.chunk=[],this.chunkReused=void 0,this.rangeIndex=0,this.to=s[s.length-1].to;let r=li.get(),o=s[0].from,{state:l,tree:a}=Hp(e,i,o,r?.state);this.state=l,this.parsedPos=this.chunkStart=o+a.length;for(let h=0;h=t?this.finish():e&&this.parsedPos>=e.viewport.to?(e.skipUntilInView(this.parsedPos,t),this.finish()):null}stopAt(e){this.stoppedAt=e}lineAfter(e){let t=this.input.chunk(e);if(this.input.lineChunks)t==` -`&&(t="");else{let i=t.indexOf(` -`);i>-1&&(t=t.slice(0,i))}return e+t.length<=this.to?t:t.slice(0,this.to-e)}nextLine(){let e=this.parsedPos,t=this.lineAfter(e),i=e+t.length;for(let s=this.rangeIndex;;){let r=this.ranges[s].to;if(r>=i||(t=t.slice(0,r-(i-t.length)),s++,s==this.ranges.length))break;let o=this.ranges[s].from,l=this.lineAfter(o);t+=l,i=o+l.length}return{line:t,end:i}}skipGapsTo(e,t,i){for(;;){let s=this.ranges[this.rangeIndex].to,r=e+t;if(i>0?s>r:s>=r)break;let o=this.ranges[++this.rangeIndex].from;t+=o-s}return t}moveRangeIndex(){for(;this.ranges[this.rangeIndex].to1){r=this.skipGapsTo(t,r,1),t+=r;let o=this.chunk.length;r=this.skipGapsTo(i,r,-1),i+=r,s+=this.chunk.length-o}return this.chunk.push(e,t,i,s),r}parseLine(e){let{line:t,end:i}=this.nextLine(),s=0,{streamParser:r}=this.lang,o=new Zh(t,e?e.state.tabSize:4,e?Nt(e.state):2);if(o.eol())r.blankLine(this.state,o.indentUnit);else for(;!o.eol();){let l=$h(r.token,o,this.state);if(l&&(s=this.emitToken(this.lang.tokenTable.resolve(l),this.parsedPos+o.start,this.parsedPos+o.pos,4,s)),o.start>1e4)break}this.parsedPos=i,this.moveRangeIndex(),this.parsedPose.start)return s}throw new Error("Stream parser failed to advance stream.")}const Ur=Object.create(null),Vi=[xe.none],zp=new Vr(Vi),xl=[],ec=Object.create(null);for(let[n,e]of[["variable","variableName"],["variable-2","variableName.special"],["string-2","string.special"],["def","variableName.definition"],["tag","tagName"],["attribute","attributeName"],["type","typeName"],["builtin","variableName.standard"],["qualifier","modifier"],["error","invalid"],["header","heading"],["property","propertyName"]])ec[n]=ic(Ur,e);class tc{constructor(e){this.extra=e,this.table=Object.assign(Object.create(null),ec)}resolve(e){return e?this.table[e]||(this.table[e]=ic(this.extra,e)):0}}const qp=new tc(Ur);function Ss(n,e){xl.indexOf(n)>-1||(xl.push(n),console.warn(e))}function ic(n,e){let t=null;for(let r of e.split(".")){let o=n[r]||m[r];o?typeof o=="function"?t?t=o(t):Ss(r,`Modifier ${r} used at start of tag`):t?Ss(r,`Tag ${r} used as modifier`):t=o:Ss(r,`Unknown highlighting tag ${r}`)}if(!t)return 0;let i=e.replace(/ /g,"_"),s=xe.define({id:Vi.length,name:i,props:[$d({[i]:t})]});return Vi.push(s),s.id}function jp(n){let e=xe.define({id:Vi.length,name:"Document",props:[Bt.add(()=>n)]});return Vi.push(e),e}const Kp=n=>{let e=Jr(n.state);return e.line?Up(n):e.block?Jp(n):!1};function Gr(n,e){return({state:t,dispatch:i})=>{if(t.readOnly)return!1;let s=n(e,t);return s?(i(t.update(s)),!0):!1}}const Up=Gr(Zp,0),Gp=Gr(nc,0),Jp=Gr((n,e)=>nc(n,e,Xp(e)),0);function Jr(n,e=n.selection.main.head){let t=n.languageDataAt("commentTokens",e);return t.length?t[0]:{}}const bi=50;function Yp(n,{open:e,close:t},i,s){let r=n.sliceDoc(i-bi,i),o=n.sliceDoc(s,s+bi),l=/\s*$/.exec(r)[0].length,a=/^\s*/.exec(o)[0].length,h=r.length-l;if(r.slice(h-e.length,h)==e&&o.slice(a,a+t.length)==t)return{open:{pos:i-l,margin:l&&1},close:{pos:s+a,margin:a&&1}};let c,f;s-i<=2*bi?c=f=n.sliceDoc(i,s):(c=n.sliceDoc(i,i+bi),f=n.sliceDoc(s-bi,s));let u=/^\s*/.exec(c)[0].length,d=/\s*$/.exec(f)[0].length,p=f.length-d-t.length;return c.slice(u,u+e.length)==e&&f.slice(p,p+t.length)==t?{open:{pos:i+u+e.length,margin:/\s/.test(c.charAt(u+e.length))?1:0},close:{pos:s-d-t.length,margin:/\s/.test(f.charAt(p-1))?1:0}}:null}function Xp(n){let e=[];for(let t of n.selection.ranges){let i=n.doc.lineAt(t.from),s=t.to<=i.to?i:n.doc.lineAt(t.to),r=e.length-1;r>=0&&e[r].to>i.from?e[r].to=s.to:e.push({from:i.from,to:s.to})}return e}function nc(n,e,t=e.selection.ranges){let i=t.map(r=>Jr(e,r.from).block);if(!i.every(r=>r))return null;let s=t.map((r,o)=>Yp(e,i[o],r.from,r.to));if(n!=2&&!s.every(r=>r))return{changes:e.changes(t.map((r,o)=>s[o]?[]:[{from:r.from,insert:i[o].open+" "},{from:r.to,insert:" "+i[o].close}]))};if(n!=1&&s.some(r=>r)){let r=[];for(let o=0,l;os&&(r==o||o>c.from)){s=c.from;let f=Jr(e,h).line;if(!f)continue;let u=/^\s*/.exec(c.text)[0].length,d=u==c.length,p=c.text.slice(u,u+f.length)==f?u:-1;ur.comment<0&&(!r.empty||r.single))){let r=[];for(let{line:l,token:a,indent:h,empty:c,single:f}of i)(f||!c)&&r.push({from:l.from+h,insert:a+" "});let o=e.changes(r);return{changes:o,selection:e.selection.map(o,1)}}else if(n!=1&&i.some(r=>r.comment>=0)){let r=[];for(let{line:o,comment:l,token:a}of i)if(l>=0){let h=o.from+l,c=h+a.length;o.text[c-o.from]==" "&&c++,r.push({from:h,to:c})}return{changes:r}}return null}const kr=Ht.define(),Qp=Ht.define(),$p=O.define(),sc=O.define({combine(n){return Wt(n,{minDepth:100,newGroupDelay:500},{minDepth:Math.max,newGroupDelay:Math.min})}});function em(n){let e=0;return n.iterChangedRanges((t,i)=>e=i),e}const rc=Me.define({create(){return Ze.empty},update(n,e){let t=e.state.facet(sc),i=e.annotation(kr);if(i){let a=e.docChanged?k.single(em(e.changes)):void 0,h=Se.fromTransaction(e,a),c=i.side,f=c==0?n.undone:n.done;return h?f=Fn(f,f.length,t.minDepth,h):f=ac(f,e.startState.selection),new Ze(c==0?i.rest:f,c==0?f:i.rest)}let s=e.annotation(Qp);if((s=="full"||s=="before")&&(n=n.isolate()),e.annotation(re.addToHistory)===!1)return e.changes.empty?n:n.addMapping(e.changes.desc);let r=Se.fromTransaction(e),o=e.annotation(re.time),l=e.annotation(re.userEvent);return r?n=n.addChanges(r,o,l,t.newGroupDelay,t.minDepth):e.selection&&(n=n.addSelection(e.startState.selection,o,l,t.newGroupDelay)),(s=="full"||s=="after")&&(n=n.isolate()),n},toJSON(n){return{done:n.done.map(e=>e.toJSON()),undone:n.undone.map(e=>e.toJSON())}},fromJSON(n){return new Ze(n.done.map(Se.fromJSON),n.undone.map(Se.fromJSON))}});function tm(n={}){return[rc,sc.of(n),B.domEventHandlers({beforeinput(e,t){let i=e.inputType=="historyUndo"?oc:e.inputType=="historyRedo"?vr:null;return i?(e.preventDefault(),i(t)):!1}})]}function Zn(n,e){return function({state:t,dispatch:i}){if(!e&&t.readOnly)return!1;let s=t.field(rc,!1);if(!s)return!1;let r=s.pop(n,t,e);return r?(i(r),!0):!1}}const oc=Zn(0,!1),vr=Zn(1,!1),im=Zn(0,!0),nm=Zn(1,!0);class Se{constructor(e,t,i,s,r){this.changes=e,this.effects=t,this.mapped=i,this.startSelection=s,this.selectionsAfter=r}setSelAfter(e){return new Se(this.changes,this.effects,this.mapped,this.startSelection,e)}toJSON(){var e,t,i;return{changes:(e=this.changes)===null||e===void 0?void 0:e.toJSON(),mapped:(t=this.mapped)===null||t===void 0?void 0:t.toJSON(),startSelection:(i=this.startSelection)===null||i===void 0?void 0:i.toJSON(),selectionsAfter:this.selectionsAfter.map(s=>s.toJSON())}}static fromJSON(e){return new Se(e.changes&&ne.fromJSON(e.changes),[],e.mapped&&Qe.fromJSON(e.mapped),e.startSelection&&k.fromJSON(e.startSelection),e.selectionsAfter.map(k.fromJSON))}static fromTransaction(e,t){let i=_e;for(let s of e.startState.facet($p)){let r=s(e);r.length&&(i=i.concat(r))}return!i.length&&e.changes.empty?null:new Se(e.changes.invert(e.startState.doc),i,void 0,t||e.startState.selection,_e)}static selection(e){return new Se(void 0,_e,void 0,void 0,e)}}function Fn(n,e,t,i){let s=e+1>t+20?e-t-1:0,r=n.slice(s,e);return r.push(i),r}function sm(n,e){let t=[],i=!1;return n.iterChangedRanges((s,r)=>t.push(s,r)),e.iterChangedRanges((s,r,o,l)=>{for(let a=0;a=h&&o<=c&&(i=!0)}}),i}function rm(n,e){return n.ranges.length==e.ranges.length&&n.ranges.filter((t,i)=>t.empty!=e.ranges[i].empty).length===0}function lc(n,e){return n.length?e.length?n.concat(e):n:e}const _e=[],om=200;function ac(n,e){if(n.length){let t=n[n.length-1],i=t.selectionsAfter.slice(Math.max(0,t.selectionsAfter.length-om));return i.length&&i[i.length-1].eq(e)?n:(i.push(e),Fn(n,n.length-1,1e9,t.setSelAfter(i)))}else return[Se.selection([e])]}function lm(n){let e=n[n.length-1],t=n.slice();return t[n.length-1]=e.setSelAfter(e.selectionsAfter.slice(0,e.selectionsAfter.length-1)),t}function Cs(n,e){if(!n.length)return n;let t=n.length,i=_e;for(;t;){let s=am(n[t-1],e,i);if(s.changes&&!s.changes.empty||s.effects.length){let r=n.slice(0,t);return r[t-1]=s,r}else e=s.mapped,t--,i=s.selectionsAfter}return i.length?[Se.selection(i)]:_e}function am(n,e,t){let i=lc(n.selectionsAfter.length?n.selectionsAfter.map(l=>l.map(e)):_e,t);if(!n.changes)return Se.selection(i);let s=n.changes.map(e),r=e.mapDesc(n.changes,!0),o=n.mapped?n.mapped.composeDesc(r):r;return new Se(s,R.mapEffects(n.effects,e),o,n.startSelection.map(r),i)}const hm=/^(input\.type|delete)($|\.)/;class Ze{constructor(e,t,i=0,s=void 0){this.done=e,this.undone=t,this.prevTime=i,this.prevUserEvent=s}isolate(){return this.prevTime?new Ze(this.done,this.undone):this}addChanges(e,t,i,s,r){let o=this.done,l=o[o.length-1];return l&&l.changes&&!l.changes.empty&&e.changes&&(!i||hm.test(i))&&(!l.selectionsAfter.length&&t-this.prevTime0&&t-this.prevTimet.empty?n.moveByChar(t,e):Qn(t,e))}function we(n){return n.textDirectionAt(n.state.selection.main.head)==Q.LTR}const cc=n=>hc(n,!we(n)),fc=n=>hc(n,we(n));function uc(n,e){return We(n,t=>t.empty?n.moveByGroup(t,e):Qn(t,e))}const fm=n=>uc(n,!we(n)),um=n=>uc(n,we(n));function dm(n,e,t){if(e.type.prop(t))return!0;let i=e.to-e.from;return i&&(i>2||/[^\s,.;:]/.test(n.sliceDoc(e.from,e.to)))||e.firstChild}function $n(n,e,t){let i=pe(n).resolveInner(e.head),s=t?L.closedBy:L.openedBy;for(let a=e.head;;){let h=t?i.childAfter(a):i.childBefore(a);if(!h)break;dm(n,h,s)?i=h:a=t?h.to:h.from}let r=i.type.prop(s),o,l;return r&&(o=t?Gt(n,i.from,1):Gt(n,i.to,-1))&&o.matched?l=t?o.end.to:o.end.from:l=t?i.to:i.from,k.cursor(l,t?-1:1)}const pm=n=>We(n,e=>$n(n.state,e,!we(n))),mm=n=>We(n,e=>$n(n.state,e,we(n)));function dc(n,e){return We(n,t=>{if(!t.empty)return Qn(t,e);let i=n.moveVertically(t,e);return i.head!=t.head?i:n.moveToLineBoundary(t,e)})}const pc=n=>dc(n,!1),mc=n=>dc(n,!0);function gc(n){return Math.max(n.defaultLineHeight,Math.min(n.dom.clientHeight,innerHeight)-5)}function bc(n,e){let{state:t}=n,i=fi(t.selection,l=>l.empty?n.moveVertically(l,e,gc(n)):Qn(l,e));if(i.eq(t.selection))return!1;let s=n.coordsAtPos(t.selection.main.head),r=n.scrollDOM.getBoundingClientRect(),o;return s&&s.top>r.top&&s.bottombc(n,!1),xr=n=>bc(n,!0);function St(n,e,t){let i=n.lineBlockAt(e.head),s=n.moveToLineBoundary(e,t);if(s.head==e.head&&s.head!=(t?i.to:i.from)&&(s=n.moveToLineBoundary(e,t,!1)),!t&&s.head==i.from&&i.length){let r=/^\s*/.exec(n.state.sliceDoc(i.from,Math.min(i.from+100,i.to)))[0].length;r&&e.head!=i.from+r&&(s=k.cursor(i.from+r))}return s}const gm=n=>We(n,e=>St(n,e,!0)),bm=n=>We(n,e=>St(n,e,!1)),ym=n=>We(n,e=>St(n,e,!we(n))),wm=n=>We(n,e=>St(n,e,we(n))),km=n=>We(n,e=>k.cursor(n.lineBlockAt(e.head).from,1)),vm=n=>We(n,e=>k.cursor(n.lineBlockAt(e.head).to,-1));function xm(n,e,t){let i=!1,s=fi(n.selection,r=>{let o=Gt(n,r.head,-1)||Gt(n,r.head,1)||r.head>0&&Gt(n,r.head-1,1)||r.headxm(n,e,!1);function Ve(n,e){let t=fi(n.state.selection,i=>{let s=e(i);return k.range(i.anchor,s.head,s.goalColumn)});return t.eq(n.state.selection)?!1:(n.dispatch(it(n.state,t)),!0)}function yc(n,e){return Ve(n,t=>n.moveByChar(t,e))}const wc=n=>yc(n,!we(n)),kc=n=>yc(n,we(n));function vc(n,e){return Ve(n,t=>n.moveByGroup(t,e))}const Cm=n=>vc(n,!we(n)),Am=n=>vc(n,we(n)),Mm=n=>Ve(n,e=>$n(n.state,e,!we(n))),Dm=n=>Ve(n,e=>$n(n.state,e,we(n)));function xc(n,e){return Ve(n,t=>n.moveVertically(t,e))}const Sc=n=>xc(n,!1),Cc=n=>xc(n,!0);function Ac(n,e){return Ve(n,t=>n.moveVertically(t,e,gc(n)))}const Cl=n=>Ac(n,!1),Al=n=>Ac(n,!0),Tm=n=>Ve(n,e=>St(n,e,!0)),Om=n=>Ve(n,e=>St(n,e,!1)),Bm=n=>Ve(n,e=>St(n,e,!we(n))),Pm=n=>Ve(n,e=>St(n,e,we(n))),Em=n=>Ve(n,e=>k.cursor(n.lineBlockAt(e.head).from)),Rm=n=>Ve(n,e=>k.cursor(n.lineBlockAt(e.head).to)),Ml=({state:n,dispatch:e})=>(e(it(n,{anchor:0})),!0),Dl=({state:n,dispatch:e})=>(e(it(n,{anchor:n.doc.length})),!0),Tl=({state:n,dispatch:e})=>(e(it(n,{anchor:n.selection.main.anchor,head:0})),!0),Ol=({state:n,dispatch:e})=>(e(it(n,{anchor:n.selection.main.anchor,head:n.doc.length})),!0),Lm=({state:n,dispatch:e})=>(e(n.update({selection:{anchor:0,head:n.doc.length},userEvent:"select"})),!0),Im=({state:n,dispatch:e})=>{let t=ts(n).map(({from:i,to:s})=>k.range(i,Math.min(s+1,n.doc.length)));return e(n.update({selection:k.create(t),userEvent:"select"})),!0},_m=({state:n,dispatch:e})=>{let t=fi(n.selection,i=>{var s;let r=pe(n).resolveInner(i.head,1);for(;!(r.from=i.to||r.to>i.to&&r.from<=i.from||!(!((s=r.parent)===null||s===void 0)&&s.parent));)r=r.parent;return k.range(r.to,r.from)});return e(it(n,t)),!0},Nm=({state:n,dispatch:e})=>{let t=n.selection,i=null;return t.ranges.length>1?i=k.create([t.main]):t.main.empty||(i=k.create([k.cursor(t.main.head)])),i?(e(it(n,i)),!0):!1};function es(n,e){if(n.state.readOnly)return!1;let t="delete.selection",{state:i}=n,s=i.changeByRange(r=>{let{from:o,to:l}=r;if(o==l){let a=e(o);ao&&(t="delete.forward",a=cn(n,a,!0)),o=Math.min(o,a),l=Math.max(l,a)}else o=cn(n,o,!1),l=cn(n,l,!0);return o==l?{range:r}:{changes:{from:o,to:l},range:k.cursor(o)}});return s.changes.empty?!1:(n.dispatch(i.update(s,{scrollIntoView:!0,userEvent:t,effects:t=="delete.selection"?B.announce.of(i.phrase("Selection deleted")):void 0})),!0)}function cn(n,e,t){if(n instanceof B)for(let i of n.state.facet(B.atomicRanges).map(s=>s(n)))i.between(e,e,(s,r)=>{se&&(e=t?r:s)});return e}const Mc=(n,e)=>es(n,t=>{let{state:i}=n,s=i.doc.lineAt(t),r,o;if(!e&&t>s.from&&tMc(n,!1),Dc=n=>Mc(n,!0),Tc=(n,e)=>es(n,t=>{let i=t,{state:s}=n,r=s.doc.lineAt(i),o=s.charCategorizer(i);for(let l=null;;){if(i==(e?r.to:r.from)){i==t&&r.number!=(e?s.doc.lines:1)&&(i+=e?1:-1);break}let a=Oe(r.text,i-r.from,e)+r.from,h=r.text.slice(Math.min(i,a)-r.from,Math.max(i,a)-r.from),c=o(h);if(l!=null&&c!=l)break;(h!=" "||i!=t)&&(l=c),i=a}return i}),Oc=n=>Tc(n,!1),Vm=n=>Tc(n,!0),Bc=n=>es(n,e=>{let t=n.lineBlockAt(e).to;return ees(n,e=>{let t=n.lineBlockAt(e).from;return e>t?t:Math.max(0,e-1)}),Hm=({state:n,dispatch:e})=>{if(n.readOnly)return!1;let t=n.changeByRange(i=>({changes:{from:i.from,to:i.to,insert:N.of(["",""])},range:k.cursor(i.from)}));return e(n.update(t,{scrollIntoView:!0,userEvent:"input"})),!0},Wm=({state:n,dispatch:e})=>{if(n.readOnly)return!1;let t=n.changeByRange(i=>{if(!i.empty||i.from==0||i.from==n.doc.length)return{range:i};let s=i.from,r=n.doc.lineAt(s),o=s==r.from?s-1:Oe(r.text,s-r.from,!1)+r.from,l=s==r.to?s+1:Oe(r.text,s-r.from,!0)+r.from;return{changes:{from:o,to:l,insert:n.doc.slice(s,l).append(n.doc.slice(o,s))},range:k.cursor(l)}});return t.changes.empty?!1:(e(n.update(t,{scrollIntoView:!0,userEvent:"move.character"})),!0)};function ts(n){let e=[],t=-1;for(let i of n.selection.ranges){let s=n.doc.lineAt(i.from),r=n.doc.lineAt(i.to);if(!i.empty&&i.to==r.from&&(r=n.doc.lineAt(i.to-1)),t>=s.number){let o=e[e.length-1];o.to=r.to,o.ranges.push(i)}else e.push({from:s.from,to:r.to,ranges:[i]});t=r.number+1}return e}function Pc(n,e,t){if(n.readOnly)return!1;let i=[],s=[];for(let r of ts(n)){if(t?r.to==n.doc.length:r.from==0)continue;let o=n.doc.lineAt(t?r.to+1:r.from-1),l=o.length+1;if(t){i.push({from:r.to,to:o.to},{from:r.from,insert:o.text+n.lineBreak});for(let a of r.ranges)s.push(k.range(Math.min(n.doc.length,a.anchor+l),Math.min(n.doc.length,a.head+l)))}else{i.push({from:o.from,to:r.from},{from:r.to,insert:n.lineBreak+o.text});for(let a of r.ranges)s.push(k.range(a.anchor-l,a.head-l))}}return i.length?(e(n.update({changes:i,scrollIntoView:!0,selection:k.create(s,n.selection.mainIndex),userEvent:"move.line"})),!0):!1}const zm=({state:n,dispatch:e})=>Pc(n,e,!1),qm=({state:n,dispatch:e})=>Pc(n,e,!0);function Ec(n,e,t){if(n.readOnly)return!1;let i=[];for(let s of ts(n))t?i.push({from:s.from,insert:n.doc.slice(s.from,s.to)+n.lineBreak}):i.push({from:s.to,insert:n.lineBreak+n.doc.slice(s.from,s.to)});return e(n.update({changes:i,scrollIntoView:!0,userEvent:"input.copyline"})),!0}const jm=({state:n,dispatch:e})=>Ec(n,e,!1),Km=({state:n,dispatch:e})=>Ec(n,e,!0),Um=n=>{if(n.state.readOnly)return!1;let{state:e}=n,t=e.changes(ts(e).map(({from:s,to:r})=>(s>0?s--:rn.moveVertically(s,!0)).map(t);return n.dispatch({changes:t,selection:i,scrollIntoView:!0,userEvent:"delete.line"}),!0};function Gm(n,e){if(/\(\)|\[\]|\{\}/.test(n.sliceDoc(e-1,e+1)))return{from:e,to:e};let t=pe(n).resolveInner(e),i=t.childBefore(e),s=t.childAfter(e),r;return i&&s&&i.to<=e&&s.from>=e&&(r=i.type.prop(L.closedBy))&&r.indexOf(s.name)>-1&&n.doc.lineAt(i.to).from==n.doc.lineAt(s.from).from?{from:i.to,to:s.from}:null}const Jm=Rc(!1),Ym=Rc(!0);function Rc(n){return({state:e,dispatch:t})=>{if(e.readOnly)return!1;let i=e.changeByRange(s=>{let{from:r,to:o}=s,l=e.doc.lineAt(r),a=!n&&r==o&&Gm(e,r);n&&(r=o=(o<=l.to?l:e.doc.lineAt(o)).to);let h=new Yn(e,{simulateBreak:r,simulateDoubleBreak:!!a}),c=zr(h,r);for(c==null&&(c=/^\s*/.exec(e.doc.lineAt(r).text)[0].length);ol.from&&r{let s=[];for(let o=i.from;o<=i.to;){let l=n.doc.lineAt(o);l.number>t&&(i.empty||i.to>l.from)&&(e(l,s,i),t=l.number),o=l.to+1}let r=n.changes(s);return{changes:s,range:k.range(r.mapPos(i.anchor,1),r.mapPos(i.head,1))}})}const Xm=({state:n,dispatch:e})=>{if(n.readOnly)return!1;let t=Object.create(null),i=new Yn(n,{overrideIndentation:r=>{let o=t[r];return o??-1}}),s=Yr(n,(r,o,l)=>{let a=zr(i,r.from);if(a==null)return;/\S/.test(r.text)||(a=0);let h=/^\s*/.exec(r.text)[0],c=Ni(n,a);(h!=c||l.fromn.readOnly?!1:(e(n.update(Yr(n,(t,i)=>{i.push({from:t.from,insert:n.facet(Jn)})}),{userEvent:"input.indent"})),!0),Ic=({state:n,dispatch:e})=>n.readOnly?!1:(e(n.update(Yr(n,(t,i)=>{let s=/^\s*/.exec(t.text)[0];if(!s)return;let r=zi(s,n.tabSize),o=0,l=Ni(n,Math.max(0,r-Nt(n)));for(;o({mac:n.key,run:n.run,shift:n.shift}))),$m=[{key:"Alt-ArrowLeft",mac:"Ctrl-ArrowLeft",run:pm,shift:Mm},{key:"Alt-ArrowRight",mac:"Ctrl-ArrowRight",run:mm,shift:Dm},{key:"Alt-ArrowUp",run:zm},{key:"Shift-Alt-ArrowUp",run:jm},{key:"Alt-ArrowDown",run:qm},{key:"Shift-Alt-ArrowDown",run:Km},{key:"Escape",run:Nm},{key:"Mod-Enter",run:Ym},{key:"Alt-l",mac:"Ctrl-l",run:Im},{key:"Mod-i",run:_m,preventDefault:!0},{key:"Mod-[",run:Ic},{key:"Mod-]",run:Lc},{key:"Mod-Alt-\\",run:Xm},{key:"Shift-Mod-k",run:Um},{key:"Shift-Mod-\\",run:Sm},{key:"Mod-/",run:Kp},{key:"Alt-A",run:Gp}].concat(Qm),eg={key:"Tab",run:Lc,shift:Ic},tg="#2E3235",Ue="#DDDDDD",Ti="#B9D2FF",fn="#b0b0b0",ig="#e0e0e0",_c="#808080",As="#000000",ng="#A54543",Nc="#fc6d24",Mt="#fda331",Ms="#8abeb7",Bl="#b5bd68",yi="#6fb3d2",wi="#cc99cc",sg="#6987AF",Pl=Nc,El="#292d30",un=Ti+"30",rg=tg,Ds=Ue,og="#202325",Rl=Ue,lg=B.theme({"&":{color:Ue,backgroundColor:rg},".cm-content":{caretColor:Rl},".cm-cursor, .cm-dropCursor":{borderLeftColor:Rl},"&.cm-focused .cm-selectionBackground, .cm-selectionBackground, .cm-content ::selection":{backgroundColor:og},".cm-panels":{backgroundColor:El,color:fn},".cm-panels.cm-panels-top":{borderBottom:"2px solid black"},".cm-panels.cm-panels-bottom":{borderTop:"2px solid black"},".cm-searchMatch":{backgroundColor:Ti,outline:`1px solid ${fn}`,color:As},".cm-searchMatch.cm-searchMatch-selected":{backgroundColor:ig,color:As},".cm-activeLine":{backgroundColor:un},".cm-selectionMatch":{backgroundColor:un},"&.cm-focused .cm-matchingBracket, &.cm-focused .cm-nonmatchingBracket":{outline:`1px solid ${fn}`},"&.cm-focused .cm-matchingBracket":{backgroundColor:Ti,color:As},".cm-gutters":{borderRight:"1px solid #ffffff10",color:_c,backgroundColor:El},".cm-activeLineGutter":{backgroundColor:un},".cm-foldPlaceholder":{backgroundColor:"transparent",border:"none",color:Ti},".cm-tooltip":{border:"none",backgroundColor:Ds},".cm-tooltip .cm-tooltip-arrow:before":{borderTopColor:"transparent",borderBottomColor:"transparent"},".cm-tooltip .cm-tooltip-arrow:after":{borderTopColor:Ds,borderBottomColor:Ds},".cm-tooltip-autocomplete":{"& > ul > li[aria-selected]":{backgroundColor:un,color:fn}}},{dark:!0}),ag=ci.define([{tag:m.keyword,color:Mt},{tag:[m.name,m.deleted,m.character,m.propertyName,m.macroName],color:Bl},{tag:[m.variableName],color:yi},{tag:[m.function(m.variableName)],color:Mt},{tag:[m.labelName],color:Nc},{tag:[m.color,m.constant(m.name),m.standard(m.name)],color:Mt},{tag:[m.definition(m.name),m.separator],color:wi},{tag:[m.brace],color:wi},{tag:[m.annotation],color:Pl},{tag:[m.number,m.changed,m.annotation,m.modifier,m.self,m.namespace],color:Mt},{tag:[m.typeName,m.className],color:yi},{tag:[m.operator,m.operatorKeyword],color:wi},{tag:[m.tagName],color:Mt},{tag:[m.squareBracket],color:wi},{tag:[m.angleBracket],color:wi},{tag:[m.attributeName],color:yi},{tag:[m.regexp],color:Mt},{tag:[m.quote],color:Ue},{tag:[m.string],color:Bl},{tag:m.link,color:sg,textDecoration:"underline",textUnderlinePosition:"under"},{tag:[m.url,m.escape,m.special(m.string)],color:Ms},{tag:[m.meta],color:ng},{tag:[m.comment],color:_c,fontStyle:"italic"},{tag:m.monospace,color:Ue},{tag:m.strong,fontWeight:"bold",color:Mt},{tag:m.emphasis,fontStyle:"italic",color:yi},{tag:m.strikethrough,textDecoration:"line-through"},{tag:m.heading,fontWeight:"bold",color:Ue},{tag:m.special(m.heading1),fontWeight:"bold",color:Ue},{tag:m.heading1,fontWeight:"bold",color:Ue},{tag:[m.heading2,m.heading3,m.heading4],fontWeight:"bold",color:Ue},{tag:[m.heading5,m.heading6],color:Ue},{tag:[m.atom,m.bool,m.special(m.variableName)],color:Ms},{tag:[m.processingInstruction,m.inserted],color:Ms},{tag:[m.contentSeparator],color:yi},{tag:m.invalid,color:Ti,borderBottom:`1px dotted ${Pl}`}]),hg=[lg,jr(ag)],Ll="#2e3440",Xr="#3b4252",Il="#434c5e",dn="#4c566a",_l="#e5e9f0",Cr="#eceff4",Ts="#8fbcbb",Nl="#88c0d0",cg="#81a1c1",Fe="#5e81ac",fg="#bf616a",Kt="#d08770",Os="#ebcb8b",Vl="#a3be8c",ug="#b48ead",Fl="#d30102",Zr=Cr,Bs=Zr,dg="#ffffff",Ps=Xr,pg=Zr,Hl=Xr,mg=B.theme({"&":{color:Ll,backgroundColor:dg},".cm-content":{caretColor:Hl},".cm-cursor, .cm-dropCursor":{borderLeftColor:Hl},"&.cm-focused .cm-selectionBackground, .cm-selectionBackground, .cm-content ::selection":{backgroundColor:pg},".cm-panels":{backgroundColor:Zr,color:dn},".cm-panels.cm-panels-top":{borderBottom:"2px solid black"},".cm-panels.cm-panels-bottom":{borderTop:"2px solid black"},".cm-searchMatch":{backgroundColor:"#72a1ff59",outline:`1px solid ${dn}`},".cm-searchMatch.cm-searchMatch-selected":{backgroundColor:_l},".cm-activeLine":{backgroundColor:Bs},".cm-selectionMatch":{backgroundColor:_l},"&.cm-focused .cm-matchingBracket, &.cm-focused .cm-nonmatchingBracket":{outline:`1px solid ${dn}`},"&.cm-focused .cm-matchingBracket":{backgroundColor:Cr},".cm-gutters":{backgroundColor:Cr,color:Ll,border:"none"},".cm-activeLineGutter":{backgroundColor:Bs},".cm-foldPlaceholder":{backgroundColor:"transparent",border:"none",color:"#ddd"},".cm-tooltip":{border:"none",backgroundColor:Ps},".cm-tooltip .cm-tooltip-arrow:before":{borderTopColor:"transparent",borderBottomColor:"transparent"},".cm-tooltip .cm-tooltip-arrow:after":{borderTopColor:Ps,borderBottomColor:Ps},".cm-tooltip-autocomplete":{"& > ul > li[aria-selected]":{backgroundColor:Bs,color:dn}}},{dark:!1}),gg=ci.define([{tag:m.keyword,color:Fe},{tag:[m.name,m.deleted,m.character,m.propertyName,m.macroName],color:Kt},{tag:[m.variableName],color:Kt},{tag:[m.function(m.variableName)],color:Fe},{tag:[m.labelName],color:cg},{tag:[m.color,m.constant(m.name),m.standard(m.name)],color:Fe},{tag:[m.definition(m.name),m.separator],color:Vl},{tag:[m.brace],color:Ts},{tag:[m.annotation],color:Fl},{tag:[m.number,m.changed,m.annotation,m.modifier,m.self,m.namespace],color:Nl},{tag:[m.typeName,m.className],color:Os},{tag:[m.operator,m.operatorKeyword],color:Vl},{tag:[m.tagName],color:ug},{tag:[m.squareBracket],color:fg},{tag:[m.angleBracket],color:Kt},{tag:[m.attributeName],color:Os},{tag:[m.regexp],color:Fe},{tag:[m.quote],color:Xr},{tag:[m.string],color:Kt},{tag:m.link,color:Ts,textDecoration:"underline",textUnderlinePosition:"under"},{tag:[m.url,m.escape,m.special(m.string)],color:Kt},{tag:[m.meta],color:Nl},{tag:[m.comment],color:Il,fontStyle:"italic"},{tag:m.strong,fontWeight:"bold",color:Fe},{tag:m.emphasis,fontStyle:"italic",color:Fe},{tag:m.strikethrough,textDecoration:"line-through"},{tag:m.heading,fontWeight:"bold",color:Fe},{tag:m.special(m.heading1),fontWeight:"bold",color:Fe},{tag:m.heading1,fontWeight:"bold",color:Fe},{tag:[m.heading2,m.heading3,m.heading4],fontWeight:"bold",color:Fe},{tag:[m.heading5,m.heading6],color:Fe},{tag:[m.atom,m.bool,m.special(m.variableName)],color:Kt},{tag:[m.processingInstruction,m.inserted],color:Ts},{tag:[m.contentSeparator],color:Os},{tag:m.invalid,color:Il,borderBottom:`1px dotted ${Fl}`}]),bg=[mg,jr(gg)];function Wl(n){let e=Object.keys(n).join(""),t=/\w/.test(e);return t&&(e=e.replace(/\w/g,"")),`[${t?"\\w":""}${e.replace(/[^\w\s]/g,"\\$&")}]`}function yg(n){let e=Object.create(null),t=Object.create(null);for(let{label:s}of n){e[s[0]]=!0;for(let r=1;rtypeof s=="string"?{label:s}:s),[t,i]=e.every(s=>/^\w+$/.test(s.label))?[/\w*$/,/\w+$/]:yg(e);return s=>{let r=s.matchBefore(i);return r||s.explicit?{from:r?r.from:s.pos,options:e,validFor:t}:null}}function mb(n,e){return t=>{for(let i=pe(t.state).resolveInner(t.pos,-1);i;i=i.parent)if(n.indexOf(i.name)>-1)return null;return e(t)}}class zl{constructor(e,t,i){this.completion=e,this.source=t,this.match=i}}function Ar(n){return n.selection.main.head}function kg(n,e,t,i){return Object.assign(Object.assign({},n.changeByRange(s=>{if(s==n.selection.main)return{changes:{from:t,to:i,insert:e},range:k.cursor(t+e.length)};let r=i-t;return!s.empty||r&&n.sliceDoc(s.from-r,s.from)!=n.sliceDoc(t,i)?{range:s}:{changes:{from:s.from-r,to:s.from,insert:e},range:k.cursor(s.from-r+e.length)}})),{userEvent:"input.complete"})}function Vc(n,e){const t=e.completion.apply||e.completion.label;let i=e.source;typeof t=="string"?n.dispatch(kg(n.state,t,i.from,i.to)):t(n,e.completion,i.from,i.to)}const ql=new WeakMap;function vg(n){if(!Array.isArray(n))return n;let e=ql.get(n);return e||ql.set(n,e=wg(n)),e}class xg{constructor(e){this.pattern=e,this.chars=[],this.folded=[],this.any=[],this.precise=[],this.byWord=[];for(let t=0;t=48&&A<=57||A>=97&&A<=122?2:A>=65&&A<=90?1:0:(D=ya(A))!=D.toLowerCase()?1:D!=D.toUpperCase()?2:0;(!y||v==1&&b||C==0&&v!=0)&&(t[f]==A||i[f]==A&&(u=!0)?o[f++]=y:o.length&&(w=!1)),C=v,y+=Ee(A)}return f==a&&o[0]==0&&w?this.result(-100+(u?-200:0),o,e):d==a&&p==0?[-200-e.length,0,g]:l>-1?[-700-e.length,l,l+this.pattern.length]:d==a?[-200+-700-e.length,p,g]:f==a?this.result(-100+(u?-200:0)+-700+(w?0:-1100),o,e):t.length==2?null:this.result((s[0]?-700:0)+-200+-1100,s,e)}result(e,t,i){let s=[e-i.length],r=1;for(let o of t){let l=o+(this.astral?Ee(ge(i,o)):1);r>1&&s[r-1]==o?s[r-1]=l:(s[r++]=o,s[r++]=l)}return s}}const Ft=O.define({combine(n){return Wt(n,{activateOnTyping:!0,selectOnOpen:!0,override:null,closeOnBlur:!0,maxRenderedOptions:100,defaultKeymap:!0,optionClass:()=>"",aboveCursor:!1,icons:!0,addToOptions:[],compareCompletions:(e,t)=>e.label.localeCompare(t.label),interactionDelay:75},{defaultKeymap:(e,t)=>e&&t,closeOnBlur:(e,t)=>e&&t,icons:(e,t)=>e&&t,optionClass:(e,t)=>i=>Sg(e(i),t(i)),addToOptions:(e,t)=>e.concat(t)})}});function Sg(n,e){return n?e?n+" "+e:n:e}function Cg(n){let e=n.addToOptions.slice();return n.icons&&e.push({render(t){let i=document.createElement("div");return i.classList.add("cm-completionIcon"),t.type&&i.classList.add(...t.type.split(/\s+/g).map(s=>"cm-completionIcon-"+s)),i.setAttribute("aria-hidden","true"),i},position:20}),e.push({render(t,i,s){let r=document.createElement("span");r.className="cm-completionLabel";let{label:o}=t,l=0;for(let a=1;al&&r.appendChild(document.createTextNode(o.slice(l,h)));let f=r.appendChild(document.createElement("span"));f.appendChild(document.createTextNode(o.slice(h,c))),f.className="cm-completionMatchedText",l=c}return lt.position-i.position).map(t=>t.render)}function jl(n,e,t){if(n<=t)return{from:0,to:n};if(e<0&&(e=0),e<=n>>1){let s=Math.floor(e/t);return{from:s*t,to:(s+1)*t}}let i=Math.floor((n-e)/t);return{from:n-(i+1)*t,to:n-i*t}}class Ag{constructor(e,t){this.view=e,this.stateField=t,this.info=null,this.placeInfo={read:()=>this.measureInfo(),write:l=>this.positionInfo(l),key:this};let i=e.state.field(t),{options:s,selected:r}=i.open,o=e.state.facet(Ft);this.optionContent=Cg(o),this.optionClass=o.optionClass,this.range=jl(s.length,r,o.maxRenderedOptions),this.dom=document.createElement("div"),this.dom.className="cm-tooltip-autocomplete",this.dom.addEventListener("mousedown",l=>{for(let a=l.target,h;a&&a!=this.dom;a=a.parentNode)if(a.nodeName=="LI"&&(h=/-(\d+)$/.exec(a.id))&&+h[1]{this.info&&this.view.requestMeasure(this.placeInfo)})}mount(){this.updateSel()}update(e){e.state.field(this.stateField)!=e.startState.field(this.stateField)&&this.updateSel()}positioned(){this.info&&this.view.requestMeasure(this.placeInfo)}updateSel(){let e=this.view.state.field(this.stateField),t=e.open;if((t.selected>-1&&t.selected=this.range.to)&&(this.range=jl(t.options.length,t.selected,this.view.state.facet(Ft).maxRenderedOptions),this.list.remove(),this.list=this.dom.appendChild(this.createListBox(t.options,e.id,this.range)),this.list.addEventListener("scroll",()=>{this.info&&this.view.requestMeasure(this.placeInfo)})),this.updateSelectedOption(t.selected)){this.info&&(this.info.remove(),this.info=null);let{completion:i}=t.options[t.selected],{info:s}=i;if(!s)return;let r=typeof s=="string"?document.createTextNode(s):s(i);if(!r)return;"then"in r?r.then(o=>{o&&this.view.state.field(this.stateField,!1)==e&&this.addInfoPane(o)}).catch(o=>He(this.view.state,o,"completion info")):this.addInfoPane(r)}}addInfoPane(e){let t=this.info=document.createElement("div");t.className="cm-tooltip cm-completionInfo",t.appendChild(e),this.dom.appendChild(t),this.view.requestMeasure(this.placeInfo)}updateSelectedOption(e){let t=null;for(let i=this.list.firstChild,s=this.range.from;i;i=i.nextSibling,s++)s==e?i.hasAttribute("aria-selected")||(i.setAttribute("aria-selected","true"),t=i):i.hasAttribute("aria-selected")&&i.removeAttribute("aria-selected");return t&&Dg(this.list,t),t}measureInfo(){let e=this.dom.querySelector("[aria-selected]");if(!e||!this.info)return null;let t=this.dom.ownerDocument.defaultView||window,i=this.dom.getBoundingClientRect(),s=this.info.getBoundingClientRect(),r=e.getBoundingClientRect();if(r.top>Math.min(t.innerHeight,i.bottom)-10||r.bottom=s.height||p>i.top?c=r.bottom-i.top+"px":f=i.bottom-r.top+"px"}return{top:c,bottom:f,maxWidth:h,class:a?o?"left-narrow":"right-narrow":l?"left":"right"}}positionInfo(e){this.info&&(e?(this.info.style.top=e.top,this.info.style.bottom=e.bottom,this.info.style.maxWidth=e.maxWidth,this.info.className="cm-tooltip cm-completionInfo cm-completionInfo-"+e.class):this.info.style.top="-1e6px")}createListBox(e,t,i){const s=document.createElement("ul");s.id=t,s.setAttribute("role","listbox"),s.setAttribute("aria-expanded","true"),s.setAttribute("aria-label",this.view.state.phrase("Completions"));for(let r=i.from;rnew Ag(e,n)}function Dg(n,e){let t=n.getBoundingClientRect(),i=e.getBoundingClientRect();i.topt.bottom&&(n.scrollTop+=i.bottom-t.bottom)}function Kl(n){return(n.boost||0)*100+(n.apply?10:0)+(n.info?5:0)+(n.type?1:0)}function Tg(n,e){let t=[],i=0;for(let l of n)if(l.hasResult())if(l.result.filter===!1){let a=l.result.getMatch;for(let h of l.result.options){let c=[1e9-i++];if(a)for(let f of a(h))c.push(f);t.push(new zl(h,l,c))}}else{let a=new xg(e.sliceDoc(l.from,l.to)),h;for(let c of l.result.options)(h=a.match(c.label))&&(c.boost!=null&&(h[0]+=c.boost),t.push(new zl(c,l,h)))}let s=[],r=null,o=e.facet(Ft).compareCompletions;for(let l of t.sort((a,h)=>h.match[0]-a.match[0]||o(a.completion,h.completion)))!r||r.label!=l.completion.label||r.detail!=l.completion.detail||r.type!=null&&l.completion.type!=null&&r.type!=l.completion.type||r.apply!=l.completion.apply?s.push(l):Kl(l.completion)>Kl(r)&&(s[s.length-1]=l),r=l.completion;return s}class Oi{constructor(e,t,i,s,r){this.options=e,this.attrs=t,this.tooltip=i,this.timestamp=s,this.selected=r}setSelected(e,t){return e==this.selected||e>=this.options.length?this:new Oi(this.options,Ul(t,e),this.tooltip,this.timestamp,e)}static build(e,t,i,s,r){let o=Tg(e,t);if(!o.length)return null;let l=t.facet(Ft).selectOnOpen?0:-1;if(s&&s.selected!=l&&s.selected!=-1){let a=s.options[s.selected].completion;for(let h=0;hh.hasResult()?Math.min(a,h.from):a,1e8),create:Mg(Ki),above:r.aboveCursor},s?s.timestamp:Date.now(),l)}map(e){return new Oi(this.options,this.attrs,Object.assign(Object.assign({},this.tooltip),{pos:e.mapPos(this.tooltip.pos)}),this.timestamp,this.selected)}}class Hn{constructor(e,t,i){this.active=e,this.id=t,this.open=i}static start(){return new Hn(Pg,"cm-ac-"+Math.floor(Math.random()*2e6).toString(36),null)}update(e){let{state:t}=e,i=t.facet(Ft),r=(i.override||t.languageDataAt("autocomplete",Ar(t)).map(vg)).map(l=>(this.active.find(h=>h.source==l)||new st(l,this.active.some(h=>h.state!=0)?1:0)).update(e,i));r.length==this.active.length&&r.every((l,a)=>l==this.active[a])&&(r=this.active);let o=e.selection||r.some(l=>l.hasResult()&&e.changes.touchesRange(l.from,l.to))||!Og(r,this.active)?Oi.build(r,t,this.id,this.open,i):this.open&&e.docChanged?this.open.map(e.changes):this.open;!o&&r.every(l=>l.state!=1)&&r.some(l=>l.hasResult())&&(r=r.map(l=>l.hasResult()?new st(l.source,0):l));for(let l of e.effects)l.is(Wc)&&(o=o&&o.setSelected(l.value,this.id));return r==this.active&&o==this.open?this:new Hn(r,this.id,o)}get tooltip(){return this.open?this.open.tooltip:null}get attrs(){return this.open?this.open.attrs:Bg}}function Og(n,e){if(n==e)return!0;for(let t=0,i=0;;){for(;t-1&&(t["aria-activedescendant"]=n+"-"+e),t}const Pg=[];function Eg(n){return n.isUserEvent("input.type")?"input":n.isUserEvent("delete.backward")?"delete":null}class st{constructor(e,t,i=-1){this.source=e,this.state=t,this.explicitPos=i}hasResult(){return!1}update(e,t){let i=Eg(e),s=this;i?s=s.handleUserEvent(e,i,t):e.docChanged?s=s.handleChange(e):e.selection&&s.state!=0&&(s=new st(s.source,0));for(let r of e.effects)if(r.is(Fc))s=new st(s.source,1,r.value?Ar(e.state):-1);else if(r.is(Hc))s=new st(s.source,0);else if(r.is(Rg))for(let o of r.value)o.source==s.source&&(s=o);return s}handleUserEvent(e,t,i){return t=="delete"||!i.activateOnTyping?this.map(e.changes):new st(this.source,1)}handleChange(e){return e.changes.touchesRange(Ar(e.startState))?new st(this.source,0):this.map(e.changes)}map(e){return e.empty||this.explicitPos<0?this:new st(this.source,this.state,e.mapPos(this.explicitPos))}}const Fc=R.define(),Hc=R.define(),Rg=R.define({map(n,e){return n.map(t=>t.map(e))}}),Wc=R.define(),Ki=Me.define({create(){return Hn.start()},update(n,e){return n.update(e)},provide:n=>[_r.from(n,e=>e.tooltip),B.contentAttributes.from(n,e=>e.attrs)]});function pn(n,e="option"){return t=>{let i=t.state.field(Ki,!1);if(!i||!i.open||Date.now()-i.open.timestamp-1?i.open.selected+s*(n?1:-1):n?0:o-1;return l<0?l=e=="page"?0:o-1:l>=o&&(l=e=="page"?o-1:0),t.dispatch({effects:Wc.of(l)}),!0}}const Lg=n=>{let e=n.state.field(Ki,!1);return n.state.readOnly||!e||!e.open||e.open.selected<0||Date.now()-e.open.timestampn.state.field(Ki,!1)?(n.dispatch({effects:Fc.of(!0)}),!0):!1,_g=n=>{let e=n.state.field(Ki,!1);return!e||!e.active.some(t=>t.state!=0)?!1:(n.dispatch({effects:Hc.of(null)}),!0)},Ng=B.baseTheme({".cm-tooltip.cm-tooltip-autocomplete":{"& > ul":{fontFamily:"monospace",whiteSpace:"nowrap",overflow:"hidden auto",maxWidth_fallback:"700px",maxWidth:"min(700px, 95vw)",minWidth:"250px",maxHeight:"10em",listStyle:"none",margin:0,padding:0,"& > li":{overflowX:"hidden",textOverflow:"ellipsis",cursor:"pointer",padding:"1px 3px",lineHeight:1.2}}},"&light .cm-tooltip-autocomplete ul li[aria-selected]":{background:"#17c",color:"white"},"&dark .cm-tooltip-autocomplete ul li[aria-selected]":{background:"#347",color:"white"},".cm-completionListIncompleteTop:before, .cm-completionListIncompleteBottom:after":{content:'"···"',opacity:.5,display:"block",textAlign:"center"},".cm-tooltip.cm-completionInfo":{position:"absolute",padding:"3px 9px",width:"max-content",maxWidth:"400px",boxSizing:"border-box"},".cm-completionInfo.cm-completionInfo-left":{right:"100%"},".cm-completionInfo.cm-completionInfo-right":{left:"100%"},".cm-completionInfo.cm-completionInfo-left-narrow":{right:"30px"},".cm-completionInfo.cm-completionInfo-right-narrow":{left:"30px"},"&light .cm-snippetField":{backgroundColor:"#00000022"},"&dark .cm-snippetField":{backgroundColor:"#ffffff22"},".cm-snippetFieldPosition":{verticalAlign:"text-top",width:0,height:"1.15em",display:"inline-block",margin:"0 -0.7px -.7em",borderLeft:"1.4px dotted #888"},".cm-completionMatchedText":{textDecoration:"underline"},".cm-completionDetail":{marginLeft:"0.5em",fontStyle:"italic"},".cm-completionIcon":{fontSize:"90%",width:".8em",display:"inline-block",textAlign:"center",paddingRight:".6em",opacity:"0.6"},".cm-completionIcon-function, .cm-completionIcon-method":{"&:after":{content:"'ƒ'"}},".cm-completionIcon-class":{"&:after":{content:"'○'"}},".cm-completionIcon-interface":{"&:after":{content:"'◌'"}},".cm-completionIcon-variable":{"&:after":{content:"'𝑥'"}},".cm-completionIcon-constant":{"&:after":{content:"'𝐶'"}},".cm-completionIcon-type":{"&:after":{content:"'𝑡'"}},".cm-completionIcon-enum":{"&:after":{content:"'∪'"}},".cm-completionIcon-property":{"&:after":{content:"'□'"}},".cm-completionIcon-keyword":{"&:after":{content:"'🔑︎'"}},".cm-completionIcon-namespace":{"&:after":{content:"'▢'"}},".cm-completionIcon-text":{"&:after":{content:"'abc'",fontSize:"50%",verticalAlign:"middle"}}});class Vg{constructor(e,t,i,s){this.field=e,this.line=t,this.from=i,this.to=s}}class Qr{constructor(e,t,i){this.field=e,this.from=t,this.to=i}map(e){let t=e.mapPos(this.from,-1,de.TrackDel),i=e.mapPos(this.to,1,de.TrackDel);return t==null||i==null?null:new Qr(this.field,t,i)}}class $r{constructor(e,t){this.lines=e,this.fieldPositions=t}instantiate(e,t){let i=[],s=[t],r=e.doc.lineAt(t),o=/^\s*/.exec(r.text)[0];for(let a of this.lines){if(i.length){let h=o,c=/^\t*/.exec(a)[0].length;for(let f=0;fnew Qr(a.field,s[a.line]+a.from,s[a.line]+a.to));return{text:i,ranges:l}}static parse(e){let t=[],i=[],s=[],r;for(let o of e.split(/\r\n?|\n/)){for(;r=/[#$]\{(?:(\d+)(?::([^}]*))?|([^}]*))\}/.exec(o);){let l=r[1]?+r[1]:null,a=r[2]||r[3]||"",h=-1;for(let c=0;c=h&&f.field++}s.push(new Vg(h,i.length,r.index,r.index+a.length)),o=o.slice(0,r.index)+a+o.slice(r.index+r[0].length)}for(let l;l=/([$#])\\{/.exec(o);){o=o.slice(0,l.index)+l[1]+"{"+o.slice(l.index+l[0].length);for(let a of s)a.line==i.length&&a.from>l.index&&(a.from--,a.to--)}i.push(o)}return new $r(i,s)}}let Fg=E.widget({widget:new class extends tt{toDOM(){let n=document.createElement("span");return n.className="cm-snippetFieldPosition",n}ignoreEvent(){return!1}}}),Hg=E.mark({class:"cm-snippetField"});class ui{constructor(e,t){this.ranges=e,this.active=t,this.deco=E.set(e.map(i=>(i.from==i.to?Fg:Hg).range(i.from,i.to)))}map(e){let t=[];for(let i of this.ranges){let s=i.map(e);if(!s)return null;t.push(s)}return new ui(t,this.active)}selectionInsideField(e){return e.ranges.every(t=>this.ranges.some(i=>i.field==this.active&&i.from<=t.from&&i.to>=t.to))}}const Ui=R.define({map(n,e){return n&&n.map(e)}}),Wg=R.define(),Fi=Me.define({create(){return null},update(n,e){for(let t of e.effects){if(t.is(Ui))return t.value;if(t.is(Wg)&&n)return new ui(n.ranges,t.value)}return n&&e.docChanged&&(n=n.map(e.changes)),n&&e.selection&&!n.selectionInsideField(e.selection)&&(n=null),n},provide:n=>B.decorations.from(n,e=>e?e.deco:E.none)});function eo(n,e){return k.create(n.filter(t=>t.field==e).map(t=>k.range(t.from,t.to)))}function zg(n){let e=$r.parse(n);return(t,i,s,r)=>{let{text:o,ranges:l}=e.instantiate(t.state,s),a={changes:{from:s,to:r,insert:N.of(o)},scrollIntoView:!0};if(l.length&&(a.selection=eo(l,0)),l.length>1){let h=new ui(l,0),c=a.effects=[Ui.of(h)];t.state.field(Fi,!1)===void 0&&c.push(R.appendConfig.of([Fi,Gg,Jg,Ng]))}t.dispatch(t.state.update(a))}}function zc(n){return({state:e,dispatch:t})=>{let i=e.field(Fi,!1);if(!i||n<0&&i.active==0)return!1;let s=i.active+n,r=n>0&&!i.ranges.some(o=>o.field==s+n);return t(e.update({selection:eo(i.ranges,s),effects:Ui.of(r?null:new ui(i.ranges,s))})),!0}}const qg=({state:n,dispatch:e})=>n.field(Fi,!1)?(e(n.update({effects:Ui.of(null)})),!0):!1,jg=zc(1),Kg=zc(-1),Ug=[{key:"Tab",run:jg,shift:Kg},{key:"Escape",run:qg}],Gl=O.define({combine(n){return n.length?n[0]:Ug}}),Gg=Wi.highest(Gn.compute([Gl],n=>n.facet(Gl)));function gb(n,e){return Object.assign(Object.assign({},e),{apply:zg(n)})}const Jg=B.domEventHandlers({mousedown(n,e){let t=e.state.field(Fi,!1),i;if(!t||(i=e.posAtCoords({x:n.clientX,y:n.clientY}))==null)return!1;let s=t.ranges.find(r=>r.from<=i&&r.to>=i);return!s||s.field==t.active?!1:(e.dispatch({selection:eo(t.ranges,s.field),effects:Ui.of(t.ranges.some(r=>r.field>s.field)?new ui(t.ranges,s.field):null)}),!0)}}),Hi={brackets:["(","[","{","'",'"'],before:")]}:;>",stringPrefixes:[]},Pt=R.define({map(n,e){let t=e.mapPos(n,-1,de.TrackAfter);return t??void 0}}),to=R.define({map(n,e){return e.mapPos(n)}}),io=new class extends Lt{};io.startSide=1;io.endSide=-1;const qc=Me.define({create(){return H.empty},update(n,e){if(e.selection){let t=e.state.doc.lineAt(e.selection.main.head).from,i=e.startState.doc.lineAt(e.startState.selection.main.head).from;t!=e.changes.mapPos(i,-1)&&(n=H.empty)}n=n.map(e.changes);for(let t of e.effects)t.is(Pt)?n=n.update({add:[io.range(t.value,t.value+1)]}):t.is(to)&&(n=n.update({filter:i=>i!=t.value}));return n}});function Yg(){return[Zg,qc]}const Es="()[]{}<>";function jc(n){for(let e=0;e{if((Xg?n.composing:n.compositionStarted)||n.state.readOnly)return!1;let s=n.state.selection.main;if(i.length>2||i.length==2&&Ee(ge(i,0))==1||e!=s.from||t!=s.to)return!1;let r=e0(n.state,i);return r?(n.dispatch(r),!0):!1}),Qg=({state:n,dispatch:e})=>{if(n.readOnly)return!1;let i=Kc(n,n.selection.main.head).brackets||Hi.brackets,s=null,r=n.changeByRange(o=>{if(o.empty){let l=t0(n.doc,o.head);for(let a of i)if(a==l&&is(n.doc,o.head)==jc(ge(a,0)))return{changes:{from:o.head-a.length,to:o.head+a.length},range:k.cursor(o.head-a.length)}}return{range:s=o}});return s||e(n.update(r,{scrollIntoView:!0,userEvent:"delete.backward"})),!s},$g=[{key:"Backspace",run:Qg}];function e0(n,e){let t=Kc(n,n.selection.main.head),i=t.brackets||Hi.brackets;for(let s of i){let r=jc(ge(s,0));if(e==s)return r==s?s0(n,s,i.indexOf(s+s+s)>-1,t):i0(n,s,r,t.before||Hi.before);if(e==r&&Uc(n,n.selection.main.from))return n0(n,s,r)}return null}function Uc(n,e){let t=!1;return n.field(qc).between(0,n.doc.length,i=>{i==e&&(t=!0)}),t}function is(n,e){let t=n.sliceString(e,e+2);return t.slice(0,Ee(ge(t,0)))}function t0(n,e){let t=n.sliceString(e-2,e);return Ee(ge(t,0))==t.length?t:t.slice(1)}function i0(n,e,t,i){let s=null,r=n.changeByRange(o=>{if(!o.empty)return{changes:[{insert:e,from:o.from},{insert:t,from:o.to}],effects:Pt.of(o.to+e.length),range:k.range(o.anchor+e.length,o.head+e.length)};let l=is(n.doc,o.head);return!l||/\s/.test(l)||i.indexOf(l)>-1?{changes:{insert:e+t,from:o.head},effects:Pt.of(o.head+e.length),range:k.cursor(o.head+e.length)}:{range:s=o}});return s?null:n.update(r,{scrollIntoView:!0,userEvent:"input.type"})}function n0(n,e,t){let i=null,s=n.selection.ranges.map(r=>r.empty&&is(n.doc,r.head)==t?k.cursor(r.head+t.length):i=r);return i?null:n.update({selection:k.create(s,n.selection.mainIndex),scrollIntoView:!0,effects:n.selection.ranges.map(({from:r})=>to.of(r))})}function s0(n,e,t,i){let s=i.stringPrefixes||Hi.stringPrefixes,r=null,o=n.changeByRange(l=>{if(!l.empty)return{changes:[{insert:e,from:l.from},{insert:e,from:l.to}],effects:Pt.of(l.to+e.length),range:k.range(l.anchor+e.length,l.head+e.length)};let a=l.head,h=is(n.doc,a),c;if(h==e){if(Jl(n,a))return{changes:{insert:e+e,from:a},effects:Pt.of(a+e.length),range:k.cursor(a+e.length)};if(Uc(n,a)){let f=t&&n.sliceDoc(a,a+e.length*3)==e+e+e;return{range:k.cursor(a+e.length*(f?3:1)),effects:to.of(a)}}}else{if(t&&n.sliceDoc(a-2*e.length,a)==e+e&&(c=Yl(n,a-2*e.length,s))>-1&&Jl(n,c))return{changes:{insert:e+e+e+e,from:a},effects:Pt.of(a+e.length),range:k.cursor(a+e.length)};if(n.charCategorizer(a)(h)!=Re.Word&&Yl(n,a,s)>-1&&!r0(n,a,e,s))return{changes:{insert:e+e,from:a},effects:Pt.of(a+e.length),range:k.cursor(a+e.length)}}return{range:r=l}});return r?null:n.update(o,{scrollIntoView:!0,userEvent:"input.type"})}function Jl(n,e){let t=pe(n).resolveInner(e+1);return t.parent&&t.from==e}function r0(n,e,t,i){let s=pe(n).resolveInner(e,-1),r=i.reduce((o,l)=>Math.max(o,l.length),0);for(let o=0;o<5;o++){let l=n.sliceDoc(s.from,Math.min(s.to,s.from+t.length+r)),a=l.indexOf(t);if(!a||a>-1&&i.indexOf(l.slice(0,a))>-1){let c=s.firstChild;for(;c&&c.from==s.from&&c.to-c.from>t.length+a;){if(n.sliceDoc(c.to-t.length,c.to)==t)return!1;c=c.firstChild}return!0}let h=s.to==e&&s.parent;if(!h)break;s=h}return!1}function Yl(n,e,t){let i=n.charCategorizer(e);if(i(n.sliceDoc(e-1,e))!=Re.Word)return e;for(let s of t){let r=e-s.length;if(n.sliceDoc(r,e)==s&&i(n.sliceDoc(r-1,r))!=Re.Word)return r}return-1}const o0=[{key:"Ctrl-Space",run:Ig},{key:"Escape",run:_g},{key:"ArrowDown",run:pn(!0)},{key:"ArrowUp",run:pn(!1)},{key:"PageDown",run:pn(!0,"page")},{key:"PageUp",run:pn(!1,"page")},{key:"Enter",run:Lg}];function Ye(){var n=arguments[0];typeof n=="string"&&(n=document.createElement(n));var e=1,t=arguments[1];if(t&&typeof t=="object"&&t.nodeType==null&&!Array.isArray(t)){for(var i in t)if(Object.prototype.hasOwnProperty.call(t,i)){var s=t[i];typeof s=="string"?n.setAttribute(i,s):s!=null&&(n[i]=s)}e++}for(;el.from==l.to||l.from==l.to-1&&i.doc.lineAt(l.from).to==l.from?E.widget({widget:new b0(l),diagnostic:l}).range(l.from):E.mark({attributes:{class:"cm-lintRange cm-lintRange-"+l.severity},diagnostic:l}).range(l.from,l.to)),!0);return new Tt(o,t,hi(o))}}function hi(n,e=null,t=0){let i=null;return n.between(t,1e9,(s,r,{spec:o})=>{if(!(e&&o.diagnostic!=e))return i=new l0(s,r,o.diagnostic),!1}),i}function a0(n,e){return!!(n.effects.some(t=>t.is(no))||n.changes.touchesRange(e.pos))}function Jc(n,e){return n.field(Be,!1)?e:e.concat(R.appendConfig.of([Be,B.decorations.compute([Be],t=>{let{selected:i,panel:s}=t.field(Be);return!i||!s||i.from==i.to?E.none:E.set([c0.range(i.from,i.to)])}),Cd(f0,{hideOn:a0}),w0]))}function h0(n,e){return{effects:Jc(n,[no.of(e)])}}const no=R.define(),so=R.define(),Yc=R.define(),Be=Me.define({create(){return new Tt(E.none,null,null)},update(n,e){if(e.docChanged){let t=n.diagnostics.map(e.changes),i=null;if(n.selected){let s=e.changes.mapPos(n.selected.from,1);i=hi(t,n.selected.diagnostic,s)||hi(t,null,s)}n=new Tt(t,n.panel,i)}for(let t of e.effects)t.is(no)?n=Tt.init(t.value,n.panel,e.state):t.is(so)?n=new Tt(n.diagnostics,t.value?ns.open:null,n.selected):t.is(Yc)&&(n=new Tt(n.diagnostics,n.panel,t.value));return n},provide:n=>[ur.from(n,e=>e.panel),B.decorations.from(n,e=>e.diagnostics)]}),c0=E.mark({class:"cm-lintRange cm-lintRange-active"});function f0(n,e,t){let{diagnostics:i}=n.state.field(Be),s=[],r=2e8,o=0;i.between(e-(t<0?1:0),e+(t>0?1:0),(a,h,{spec:c})=>{e>=a&&e<=h&&(a==h||(e>a||t>0)&&(eZc(n,t,!1)))}const d0=n=>{let e=n.state.field(Be,!1);(!e||!e.panel)&&n.dispatch({effects:Jc(n.state,[so.of(!0)])});let t=Dd(n,ns.open);return t&&t.dom.querySelector(".cm-panel-lint ul").focus(),!0},Xl=n=>{let e=n.state.field(Be,!1);return!e||!e.panel?!1:(n.dispatch({effects:so.of(!1)}),!0)},p0=n=>{let e=n.state.field(Be,!1);if(!e)return!1;let t=n.state.selection.main,i=e.diagnostics.iter(t.to+1);return!i.value&&(i=e.diagnostics.iter(0),!i.value||i.from==t.from&&i.to==t.to)?!1:(n.dispatch({selection:{anchor:i.from,head:i.to},scrollIntoView:!0}),!0)},m0=[{key:"Mod-Shift-m",run:d0},{key:"F8",run:p0}],g0=ye.fromClass(class{constructor(n){this.view=n,this.timeout=-1,this.set=!0;let{delay:e}=n.state.facet(Yt);this.lintTime=Date.now()+e,this.run=this.run.bind(this),this.timeout=setTimeout(this.run,e)}run(){let n=Date.now();if(nPromise.resolve(i(this.view)))).then(i=>{let s=i.reduce((r,o)=>r.concat(o));this.view.state.doc==e.doc&&this.view.dispatch(h0(this.view.state,s))},i=>{He(this.view.state,i)})}}update(n){let e=n.state.facet(Yt);(n.docChanged||e!=n.startState.facet(Yt))&&(this.lintTime=Date.now()+e.delay,this.set||(this.set=!0,this.timeout=setTimeout(this.run,e.delay)))}force(){this.set&&(this.lintTime=Date.now(),this.run())}destroy(){clearTimeout(this.timeout)}}),Yt=O.define({combine(n){return Object.assign({sources:n.map(e=>e.source)},Wt(n.map(e=>e.config),{delay:750,markerFilter:null,tooltipFilter:null}))},enables:g0});function Xc(n){let e=[];if(n)e:for(let{name:t}of n){for(let i=0;ir.toLowerCase()==s.toLowerCase())){e.push(s);continue e}}e.push("")}return e}function Zc(n,e,t){var i;let s=t?Xc(e.actions):[];return Ye("li",{class:"cm-diagnostic cm-diagnostic-"+e.severity},Ye("span",{class:"cm-diagnosticText"},e.renderMessage?e.renderMessage():e.message),(i=e.actions)===null||i===void 0?void 0:i.map((r,o)=>{let l=f=>{f.preventDefault();let u=hi(n.state.field(Be).diagnostics,e);u&&r.apply(n,u.from,u.to)},{name:a}=r,h=s[o]?a.indexOf(s[o]):-1,c=h<0?a:[a.slice(0,h),Ye("u",a.slice(h,h+1)),a.slice(h+1)];return Ye("button",{type:"button",class:"cm-diagnosticAction",onclick:l,onmousedown:l,"aria-label":` Action: ${a}${h<0?"":` (access key "${s[o]})"`}.`},c)}),e.source&&Ye("div",{class:"cm-diagnosticSource"},e.source))}class b0 extends tt{constructor(e){super(),this.diagnostic=e}eq(e){return e.diagnostic==this.diagnostic}toDOM(){return Ye("span",{class:"cm-lintPoint cm-lintPoint-"+this.diagnostic.severity})}}class Zl{constructor(e,t){this.diagnostic=t,this.id="item_"+Math.floor(Math.random()*4294967295).toString(16),this.dom=Zc(e,t,!0),this.dom.id=this.id,this.dom.setAttribute("role","option")}}class ns{constructor(e){this.view=e,this.items=[];let t=s=>{if(s.keyCode==27)Xl(this.view),this.view.focus();else if(s.keyCode==38||s.keyCode==33)this.moveSelection((this.selectedIndex-1+this.items.length)%this.items.length);else if(s.keyCode==40||s.keyCode==34)this.moveSelection((this.selectedIndex+1)%this.items.length);else if(s.keyCode==36)this.moveSelection(0);else if(s.keyCode==35)this.moveSelection(this.items.length-1);else if(s.keyCode==13)this.view.focus();else if(s.keyCode>=65&&s.keyCode<=90&&this.selectedIndex>=0){let{diagnostic:r}=this.items[this.selectedIndex],o=Xc(r.actions);for(let l=0;l{for(let r=0;rXl(this.view)},"×")),this.update()}get selectedIndex(){let e=this.view.state.field(Be).selected;if(!e)return-1;for(let t=0;t{let h=-1,c;for(let f=i;fi&&(this.items.splice(i,h-i),s=!0)),t&&c.diagnostic==t.diagnostic?c.dom.hasAttribute("aria-selected")||(c.dom.setAttribute("aria-selected","true"),r=c):c.dom.hasAttribute("aria-selected")&&c.dom.removeAttribute("aria-selected"),i++});i({sel:r.dom.getBoundingClientRect(),panel:this.list.getBoundingClientRect()}),write:({sel:o,panel:l})=>{o.topl.bottom&&(this.list.scrollTop+=o.bottom-l.bottom)}})):this.selectedIndex<0&&this.list.removeAttribute("aria-activedescendant"),s&&this.sync()}sync(){let e=this.list.firstChild;function t(){let i=e;e=i.nextSibling,i.remove()}for(let i of this.items)if(i.dom.parentNode==this.list){for(;e!=i.dom;)t();e=i.dom.nextSibling}else this.list.insertBefore(i.dom,e);for(;e;)t()}moveSelection(e){if(this.selectedIndex<0)return;let t=this.view.state.field(Be),i=hi(t.diagnostics,this.items[e].diagnostic);i&&this.view.dispatch({selection:{anchor:i.from,head:i.to},scrollIntoView:!0,effects:Yc.of(i)})}static open(e){return new ns(e)}}function y0(n,e='viewBox="0 0 40 40"'){return`url('data:image/svg+xml,${encodeURIComponent(n)}')`}function Rs(n){return y0(``,'width="6" height="3"')}const w0=B.baseTheme({".cm-diagnostic":{padding:"3px 6px 3px 8px",marginLeft:"-1px",display:"block",whiteSpace:"pre-wrap"},".cm-diagnostic-error":{borderLeft:"5px solid #d11"},".cm-diagnostic-warning":{borderLeft:"5px solid orange"},".cm-diagnostic-info":{borderLeft:"5px solid #999"},".cm-diagnosticAction":{font:"inherit",border:"none",padding:"2px 4px",backgroundColor:"#444",color:"white",borderRadius:"3px",marginLeft:"8px"},".cm-diagnosticSource":{fontSize:"70%",opacity:.7},".cm-lintRange":{backgroundPosition:"left bottom",backgroundRepeat:"repeat-x",paddingBottom:"0.7px"},".cm-lintRange-error":{backgroundImage:Rs("#d11")},".cm-lintRange-warning":{backgroundImage:Rs("orange")},".cm-lintRange-info":{backgroundImage:Rs("#999")},".cm-lintRange-active":{backgroundColor:"#ffdd9980"},".cm-tooltip-lint":{padding:0,margin:0},".cm-lintPoint":{position:"relative","&:after":{content:'""',position:"absolute",bottom:0,left:"-2px",borderLeft:"3px solid transparent",borderRight:"3px solid transparent",borderBottom:"4px solid #d11"}},".cm-lintPoint-warning":{"&:after":{borderBottomColor:"orange"}},".cm-lintPoint-info":{"&:after":{borderBottomColor:"#999"}},".cm-panel.cm-panel-lint":{position:"relative","& ul":{maxHeight:"100px",overflowY:"auto","& [aria-selected]":{backgroundColor:"#ddd","& u":{textDecoration:"underline"}},"&:focus [aria-selected]":{background_fallback:"#bdf",backgroundColor:"Highlight",color_fallback:"white",color:"HighlightText"},"& u":{textDecoration:"none"},padding:0,margin:0},"& [name=close]":{position:"absolute",top:"0",right:"2px",background:"inherit",border:"none",font:"inherit",padding:0,margin:0}}}),k0=(()=>[Id(),id(),tm(),Tp(),Uu(),_.allowMultipleSelections.of(!0),mp(),jr(Ep,{fallback:!0}),Yg(),dd(),gd(),Gn.of([...$g,...$m,...cm,...Ap,...o0,...m0])])(),Ql={python:()=>Pe(()=>import("./index-0ba90c52.js"),["assets/index-0ba90c52.js","assets/index-6a7e443e.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Download-e6704cf2.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js"]).then(n=>n.python()),markdown:async()=>{const[n,e]=await Promise.all([Pe(()=>import("./index-950351c6.js"),["assets/index-950351c6.js","assets/index-f44e277a.js","assets/index-6a7e443e.js","assets/index-30e05911.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Download-e6704cf2.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/index-aee9714f.js"]),Pe(()=>import("./frontmatter-2e6bc0ff.js"),["assets/frontmatter-2e6bc0ff.js","assets/yaml-95012b83.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Download-e6704cf2.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js"])]);return n.markdown({extensions:[e.frontmatter]})},json:()=>Pe(()=>import("./index-d18804f7.js"),["assets/index-d18804f7.js","assets/index-6a7e443e.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Download-e6704cf2.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js"]).then(n=>n.json()),html:()=>Pe(()=>import("./index-f44e277a.js"),["assets/index-f44e277a.js","assets/index-6a7e443e.js","assets/index-30e05911.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Download-e6704cf2.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/index-aee9714f.js"]).then(n=>n.html()),css:()=>Pe(()=>import("./index-30e05911.js"),["assets/index-30e05911.js","assets/index-6a7e443e.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Download-e6704cf2.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js"]).then(n=>n.css()),javascript:()=>Pe(()=>import("./index-aee9714f.js"),["assets/index-aee9714f.js","assets/index-6a7e443e.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Download-e6704cf2.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js"]).then(n=>n.javascript()),typescript:()=>Pe(()=>import("./index-aee9714f.js"),["assets/index-aee9714f.js","assets/index-6a7e443e.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Download-e6704cf2.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js"]).then(n=>n.javascript({typescript:!0})),yaml:()=>Pe(()=>import("./yaml-95012b83.js"),[]).then(n=>Jt.define(n.yaml)),dockerfile:()=>Pe(()=>import("./dockerfile-d67bbd50.js"),[]).then(n=>Jt.define(n.dockerFile)),shell:()=>Pe(()=>import("./shell-86dd1d99.js"),[]).then(n=>Jt.define(n.shell)),r:()=>Pe(()=>import("./r-3ca97919.js"),[]).then(n=>Jt.define(n.r))},v0={py:"python",md:"markdown",js:"javascript",ts:"typescript",sh:"shell"};async function x0(n){const e=Ql[n]||Ql[v0[n]]||void 0;if(e)return e()}function S0(n){let e,t,i;return{c(){e=dt("div"),t=dt("div"),se(t,"class",i="codemirror-wrapper "+n[0]+" svelte-1sc8eck"),se(e,"class","wrap svelte-1sc8eck")},m(s,r){Ce(s,e,r),ei(e,t),n[12](t)},p(s,[r]){r&1&&i!==(i="codemirror-wrapper "+s[0]+" svelte-1sc8eck")&&se(t,"class",i)},i:Ci,o:Ci,d(s){s&&Ae(e),n[12](null)}}}function C0(n){let e=n.dom.querySelectorAll(".cm-gutterElement");if(e.length===0)return null;for(var t=0;t(b=C(),()=>b?.destroy()));function $(T){Rt[T?"unshift":"push"](()=>{g=T,t(1,g)})}return n.$$set=T=>{"classNames"in T&&t(0,i=T.classNames),"value"in T&&t(2,s=T.value),"dark_mode"in T&&t(3,r=T.dark_mode),"basic"in T&&t(4,o=T.basic),"language"in T&&t(5,l=T.language),"lines"in T&&t(6,a=T.lines),"extensions"in T&&t(7,h=T.extensions),"useTab"in T&&t(8,c=T.useTab),"readonly"in T&&t(9,f=T.readonly),"placeholder"in T&&t(10,u=T.placeholder)},n.$$.update=()=>{n.$$.dirty&32&&w(l),n.$$.dirty&2048&&G(),n.$$.dirty&4&&y(s),n.$$.dirty&64&&S()},[i,g,s,r,o,l,a,h,c,f,u,p,$]}let Qc=class extends kt{constructor(e){super(),vt(this,e,A0,S0,xt,{classNames:0,value:2,dark_mode:3,basic:4,language:5,lines:6,extensions:7,useTab:8,readonly:9,placeholder:10})}};function $l(n){let e,t,i,s;return t=new fa({}),{c(){e=dt("span"),oe(t.$$.fragment),se(e,"class","check svelte-qi7jcw")},m(r,o){Ce(r,e,o),le(t,e,null),s=!0},i(r){s||(F(t.$$.fragment,r),r&&na(()=>{s&&(i||(i=xn(e,Sn,{},!0)),i.run(1))}),s=!0)},o(r){W(t.$$.fragment,r),r&&(i||(i=xn(e,Sn,{},!1)),i.run(0)),s=!1},d(r){r&&Ae(e),ae(t),r&&i&&i.end()}}}function M0(n){let e,t,i,s,r,o,l;i=new ef({});let a=n[0]&&$l();return{c(){e=dt("button"),t=dt("span"),oe(i.$$.fragment),s=pt(),a&&a.c(),se(t,"class","copy-text"),vn(t,"copied",n[0]),se(e,"title","copy"),se(e,"class","svelte-qi7jcw")},m(h,c){Ce(h,e,c),ei(e,t),le(i,t,null),ei(e,s),a&&a.m(e,null),r=!0,o||(l=ta(e,"click",n[1]),o=!0)},p(h,[c]){(!r||c&1)&&vn(t,"copied",h[0]),h[0]?a?c&1&&F(a,1):(a=$l(),a.c(),F(a,1),a.m(e,null)):a&&(Wn(),W(a,1,1,()=>{a=null}),zn())},i(h){r||(F(i.$$.fragment,h),F(a),r=!0)},o(h){W(i.$$.fragment,h),W(a),r=!1},d(h){h&&Ae(e),ae(i),a&&a.d(),o=!1,l()}}}function D0(n,e,t){let i=!1,{value:s}=e,r;function o(){t(0,i=!0),r&&clearTimeout(r),r=setTimeout(()=>{t(0,i=!1)},2e3)}async function l(){"clipboard"in navigator&&(await navigator.clipboard.writeText(s),o())}return ia(()=>{r&&clearTimeout(r)}),n.$$set=a=>{"value"in a&&t(2,s=a.value)},[i,l,s]}class T0 extends kt{constructor(e){super(),vt(this,e,D0,M0,xt,{value:2})}}function ea(n){let e,t,i,s;return t=new fa({}),{c(){e=dt("span"),oe(t.$$.fragment),se(e,"class","check svelte-14d303a")},m(r,o){Ce(r,e,o),le(t,e,null),s=!0},i(r){s||(F(t.$$.fragment,r),r&&na(()=>{s&&(i||(i=xn(e,Sn,{},!0)),i.run(1))}),s=!0)},o(r){W(t.$$.fragment,r),r&&(i||(i=xn(e,Sn,{},!1)),i.run(0)),s=!1},d(r){r&&Ae(e),ae(t),r&&i&&i.end()}}}function O0(n){let e,t,i,s,r,o,l;t=new tf({});let a=n[0]&&ea();return{c(){e=dt("a"),oe(t.$$.fragment),i=pt(),a&&a.c(),se(e,"download",s="file."+n[2]),se(e,"href",n[1]),se(e,"class","svelte-14d303a"),vn(e,"copied",n[0])},m(h,c){Ce(h,e,c),le(t,e,null),ei(e,i),a&&a.m(e,null),r=!0,o||(l=ta(e,"click",n[3]),o=!0)},p(h,[c]){h[0]?a?c&1&&F(a,1):(a=ea(),a.c(),F(a,1),a.m(e,null)):a&&(Wn(),W(a,1,1,()=>{a=null}),zn()),(!r||c&4&&s!==(s="file."+h[2]))&&se(e,"download",s),(!r||c&2)&&se(e,"href",h[1]),(!r||c&1)&&vn(e,"copied",h[0])},i(h){r||(F(t.$$.fragment,h),F(a),r=!0)},o(h){W(t.$$.fragment,h),W(a),r=!1},d(h){h&&Ae(e),ae(t),a&&a.d(),o=!1,l()}}}function B0(n){return{py:"py",python:"py",md:"md",markdown:"md",json:"json",html:"html",css:"css",js:"js",javascript:"js",ts:"ts",typescript:"ts",yaml:"yaml",yml:"yml",dockerfile:"dockerfile",sh:"sh",shell:"sh",r:"r"}[n]||"txt"}function P0(n,e,t){let i,s,{value:r}=e,{language:o}=e,l=!1,a;function h(){t(0,l=!0),a&&clearTimeout(a),a=setTimeout(()=>{t(0,l=!1)},2e3)}return ia(()=>{a&&clearTimeout(a)}),n.$$set=c=>{"value"in c&&t(4,r=c.value),"language"in c&&t(5,o=c.language)},n.$$.update=()=>{n.$$.dirty&32&&t(2,i=B0(o)),n.$$.dirty&16&&t(1,s=URL.createObjectURL(new Blob([r])))},[l,s,i,h,r,o]}class E0 extends kt{constructor(e){super(),vt(this,e,P0,O0,xt,{value:4,language:5})}}function R0(n){let e,t,i,s,r;return t=new E0({props:{value:n[0],language:n[1]}}),s=new T0({props:{value:n[0]}}),{c(){e=dt("div"),oe(t.$$.fragment),i=pt(),oe(s.$$.fragment),se(e,"class","svelte-1yin446")},m(o,l){Ce(o,e,l),le(t,e,null),ei(e,i),le(s,e,null),r=!0},p(o,[l]){const a={};l&1&&(a.value=o[0]),l&2&&(a.language=o[1]),t.$set(a);const h={};l&1&&(h.value=o[0]),s.$set(h)},i(o){r||(F(t.$$.fragment,o),F(s.$$.fragment,o),r=!0)},o(o){W(t.$$.fragment,o),W(s.$$.fragment,o),r=!1},d(o){o&&Ae(e),ae(t),ae(s)}}}function L0(n,e,t){let{value:i}=e,{language:s}=e;return n.$$set=r=>{"value"in r&&t(0,i=r.value),"language"in r&&t(1,s=r.language)},[i,s]}class I0 extends kt{constructor(e){super(),vt(this,e,L0,R0,xt,{value:0,language:1})}}function _0(n){let e,t,i,s,r;e=new I0({props:{language:n[1],value:n[0]}});function o(a){n[12](a)}let l={language:n[1],lines:n[2],dark_mode:n[9],readonly:!0};return n[0]!==void 0&&(l.value=n[0]),i=new Qc({props:l}),Rt.push(()=>ti(i,"value",o)),{c(){oe(e.$$.fragment),t=pt(),oe(i.$$.fragment)},m(a,h){le(e,a,h),Ce(a,t,h),le(i,a,h),r=!0},p(a,h){const c={};h&2&&(c.language=a[1]),h&1&&(c.value=a[0]),e.$set(c);const f={};h&2&&(f.language=a[1]),h&4&&(f.lines=a[2]),!s&&h&1&&(s=!0,f.value=a[0],ii(()=>s=!1)),i.$set(f)},i(a){r||(F(e.$$.fragment,a),F(i.$$.fragment,a),r=!0)},o(a){W(e.$$.fragment,a),W(i.$$.fragment,a),r=!1},d(a){a&&Ae(t),ae(e,a),ae(i,a)}}}function N0(n){let e,t;return e=new nf({props:{unpadded_box:!0,size:"large",$$slots:{default:[V0]},$$scope:{ctx:n}}}),{c(){oe(e.$$.fragment)},m(i,s){le(e,i,s),t=!0},p(i,s){const r={};s&32768&&(r.$$scope={dirty:s,ctx:i}),e.$set(r)},i(i){t||(F(e.$$.fragment,i),t=!0)},o(i){W(e.$$.fragment,i),t=!1},d(i){ae(e,i)}}}function V0(n){let e,t;return e=new Dr({}),{c(){oe(e.$$.fragment)},m(i,s){le(e,i,s),t=!0},i(i){t||(F(e.$$.fragment,i),t=!0)},o(i){W(e.$$.fragment,i),t=!1},d(i){ae(e,i)}}}function F0(n){let e,t,i,s,r,o,l,a;const h=[n[8]];let c={};for(let p=0;p{u[y]=null}),zn(),o=u[r],o?o.p(p,g):(o=u[r]=f[r](p),o.c()),F(o,1),o.m(l.parentNode,l))},i(p){a||(F(e.$$.fragment,p),F(i.$$.fragment,p),F(o),a=!0)},o(p){W(e.$$.fragment,p),W(i.$$.fragment,p),W(o),a=!1},d(p){p&&(Ae(t),Ae(s),Ae(l)),ae(e,p),ae(i,p),u[r].d(p)}}}function H0(n){let e,t;return e=new ca({props:{variant:"solid",padding:!1,elem_id:n[3],elem_classes:n[4],visible:n[5],$$slots:{default:[F0]},$$scope:{ctx:n}}}),{c(){oe(e.$$.fragment)},m(i,s){le(e,i,s),t=!0},p(i,[s]){const r={};s&8&&(r.elem_id=i[3]),s&16&&(r.elem_classes=i[4]),s&32&&(r.visible=i[5]),s&33223&&(r.$$scope={dirty:s,ctx:i}),e.$set(r)},i(i){t||(F(e.$$.fragment,i),t=!0)},o(i){W(e.$$.fragment,i),t=!1},d(i){ae(e,i)}}}function W0(n,e,t){const i=Mr();let{value:s=""}=e,{value_is_output:r=!1}=e,{language:o=""}=e,{lines:l=5}=e,{target:a}=e,{elem_id:h=""}=e,{elem_classes:c=[]}=e,{visible:f=!0}=e,{label:u="Code"}=e,{show_label:d=!0}=e,{loading_status:p}=e,g=a.classList.contains("dark");function b(){i("change",s),r||i("input")}sa(()=>{t(10,r=!1)});function w(y){s=y,t(0,s)}return n.$$set=y=>{"value"in y&&t(0,s=y.value),"value_is_output"in y&&t(10,r=y.value_is_output),"language"in y&&t(1,o=y.language),"lines"in y&&t(2,l=y.lines),"target"in y&&t(11,a=y.target),"elem_id"in y&&t(3,h=y.elem_id),"elem_classes"in y&&t(4,c=y.elem_classes),"visible"in y&&t(5,f=y.visible),"label"in y&&t(6,u=y.label),"show_label"in y&&t(7,d=y.show_label),"loading_status"in y&&t(8,p=y.loading_status)},n.$$.update=()=>{n.$$.dirty&1&&b()},[s,o,l,h,c,f,u,d,p,g,r,a,w]}class z0 extends kt{constructor(e){super(),vt(this,e,W0,H0,xt,{value:0,value_is_output:10,language:1,lines:2,target:11,elem_id:3,elem_classes:4,visible:5,label:6,show_label:7,loading_status:8})}}function q0(n){let e,t,i,s,r,o,l;const a=[n[8]];let h={};for(let u=0;uti(r,"value",c)),{c(){oe(e.$$.fragment),t=pt(),oe(i.$$.fragment),s=pt(),oe(r.$$.fragment)},m(u,d){le(e,u,d),Ce(u,t,d),le(i,u,d),Ce(u,s,d),le(r,u,d),l=!0},p(u,d){const p=d&256?aa(a,[ha(u[8])]):{};e.$set(p);const g={};d&128&&(g.show_label=u[7]),d&64&&(g.label=u[6]),i.$set(g);const b={};d&2&&(b.language=u[1]),d&4&&(b.lines=u[2]),!o&&d&1&&(o=!0,b.value=u[0],ii(()=>o=!1)),r.$set(b)},i(u){l||(F(e.$$.fragment,u),F(i.$$.fragment,u),F(r.$$.fragment,u),l=!0)},o(u){W(e.$$.fragment,u),W(i.$$.fragment,u),W(r.$$.fragment,u),l=!1},d(u){u&&(Ae(t),Ae(s)),ae(e,u),ae(i,u),ae(r,u)}}}function j0(n){let e,t;return e=new ca({props:{variant:"solid",padding:!1,elem_id:n[3],elem_classes:n[4],visible:n[5],$$slots:{default:[q0]},$$scope:{ctx:n}}}),{c(){oe(e.$$.fragment)},m(i,s){le(e,i,s),t=!0},p(i,[s]){const r={};s&8&&(r.elem_id=i[3]),s&16&&(r.elem_classes=i[4]),s&32&&(r.visible=i[5]),s&33223&&(r.$$scope={dirty:s,ctx:i}),e.$set(r)},i(i){t||(F(e.$$.fragment,i),t=!0)},o(i){W(e.$$.fragment,i),t=!1},d(i){ae(e,i)}}}function K0(n,e,t){const i=Mr();let{value:s=""}=e,{value_is_output:r=!1}=e,{language:o=""}=e,{lines:l=5}=e,{target:a}=e,{elem_id:h=""}=e,{elem_classes:c=[]}=e,{visible:f=!0}=e,{label:u="Code"}=e,{show_label:d=!0}=e,{loading_status:p}=e,g=a.classList.contains("dark");function b(){i("change",s),r||i("input")}sa(()=>{t(10,r=!1)});function w(y){s=y,t(0,s)}return n.$$set=y=>{"value"in y&&t(0,s=y.value),"value_is_output"in y&&t(10,r=y.value_is_output),"language"in y&&t(1,o=y.language),"lines"in y&&t(2,l=y.lines),"target"in y&&t(11,a=y.target),"elem_id"in y&&t(3,h=y.elem_id),"elem_classes"in y&&t(4,c=y.elem_classes),"visible"in y&&t(5,f=y.visible),"label"in y&&t(6,u=y.label),"show_label"in y&&t(7,d=y.show_label),"loading_status"in y&&t(8,p=y.loading_status)},n.$$.update=()=>{n.$$.dirty&1&&b()},[s,o,l,h,c,f,u,d,p,g,r,a,w]}class U0 extends kt{constructor(e){super(),vt(this,e,K0,j0,xt,{value:0,value_is_output:10,language:1,lines:2,target:11,elem_id:3,elem_classes:4,visible:5,label:6,show_label:7,loading_status:8})}}function G0(n){let e,t,i,s;function r(a){n[16](a)}function o(a){n[17](a)}let l={language:n[2],lines:n[3],target:n[4],elem_id:n[5],elem_classes:n[6],visible:n[7],label:n[9],show_label:n[10],loading_status:n[11]};return n[0]!==void 0&&(l.value=n[0]),n[1]!==void 0&&(l.value_is_output=n[1]),e=new U0({props:l}),Rt.push(()=>ti(e,"value",r)),Rt.push(()=>ti(e,"value_is_output",o)),e.$on("change",n[18]),e.$on("input",n[19]),{c(){oe(e.$$.fragment)},m(a,h){le(e,a,h),s=!0},p(a,h){const c={};h&4&&(c.language=a[2]),h&8&&(c.lines=a[3]),h&16&&(c.target=a[4]),h&32&&(c.elem_id=a[5]),h&64&&(c.elem_classes=a[6]),h&128&&(c.visible=a[7]),h&512&&(c.label=a[9]),h&1024&&(c.show_label=a[10]),h&2048&&(c.loading_status=a[11]),!t&&h&1&&(t=!0,c.value=a[0],ii(()=>t=!1)),!i&&h&2&&(i=!0,c.value_is_output=a[1],ii(()=>i=!1)),e.$set(c)},i(a){s||(F(e.$$.fragment,a),s=!0)},o(a){W(e.$$.fragment,a),s=!1},d(a){ae(e,a)}}}function J0(n){let e,t,i,s;function r(a){n[12](a)}function o(a){n[13](a)}let l={language:n[2],lines:n[3],target:n[4],elem_id:n[5],elem_classes:n[6],visible:n[7],label:n[9],show_label:n[10],loading_status:n[11]};return n[0]!==void 0&&(l.value=n[0]),n[1]!==void 0&&(l.value_is_output=n[1]),e=new z0({props:l}),Rt.push(()=>ti(e,"value",r)),Rt.push(()=>ti(e,"value_is_output",o)),e.$on("change",n[14]),e.$on("input",n[15]),{c(){oe(e.$$.fragment)},m(a,h){le(e,a,h),s=!0},p(a,h){const c={};h&4&&(c.language=a[2]),h&8&&(c.lines=a[3]),h&16&&(c.target=a[4]),h&32&&(c.elem_id=a[5]),h&64&&(c.elem_classes=a[6]),h&128&&(c.visible=a[7]),h&512&&(c.label=a[9]),h&1024&&(c.show_label=a[10]),h&2048&&(c.loading_status=a[11]),!t&&h&1&&(t=!0,c.value=a[0],ii(()=>t=!1)),!i&&h&2&&(i=!0,c.value_is_output=a[1],ii(()=>i=!1)),e.$set(c)},i(a){s||(F(e.$$.fragment,a),s=!0)},o(a){W(e.$$.fragment,a),s=!1},d(a){ae(e,a)}}}function Y0(n){let e,t,i,s;const r=[J0,G0],o=[];function l(a,h){return a[8]=="static"?0:1}return e=l(n),t=o[e]=r[e](n),{c(){t.c(),i=la()},m(a,h){o[e].m(a,h),Ce(a,i,h),s=!0},p(a,[h]){let c=e;e=l(a),e===c?o[e].p(a,h):(Wn(),W(o[c],1,1,()=>{o[c]=null}),zn(),t=o[e],t?t.p(a,h):(t=o[e]=r[e](a),t.c()),F(t,1),t.m(i.parentNode,i))},i(a){s||(F(t),s=!0)},o(a){W(t),s=!1},d(a){a&&Ae(i),o[e].d(a)}}}function X0(n,e,t){let{value:i=""}=e,{value_is_output:s=!1}=e,{language:r=""}=e,{lines:o=5}=e,{target:l}=e,{elem_id:a=""}=e,{elem_classes:h=[]}=e,{visible:c=!0}=e,{mode:f}=e,{label:u="Code"}=e,{show_label:d=!0}=e,{loading_status:p}=e;function g(v){i=v,t(0,i)}function b(v){s=v,t(1,s)}function w(v){Ji.call(this,n,v)}function y(v){Ji.call(this,n,v)}function S(v){i=v,t(0,i)}function C(v){s=v,t(1,s)}function A(v){Ji.call(this,n,v)}function D(v){Ji.call(this,n,v)}return n.$$set=v=>{"value"in v&&t(0,i=v.value),"value_is_output"in v&&t(1,s=v.value_is_output),"language"in v&&t(2,r=v.language),"lines"in v&&t(3,o=v.lines),"target"in v&&t(4,l=v.target),"elem_id"in v&&t(5,a=v.elem_id),"elem_classes"in v&&t(6,h=v.elem_classes),"visible"in v&&t(7,c=v.visible),"mode"in v&&t(8,f=v.mode),"label"in v&&t(9,u=v.label),"show_label"in v&&t(10,d=v.show_label),"loading_status"in v&&t(11,p=v.loading_status)},[i,s,r,o,l,a,h,c,f,u,d,p,g,b,w,y,S,C,A,D]}class Z0 extends kt{constructor(e){super(),vt(this,e,X0,Y0,xt,{value:0,value_is_output:1,language:2,lines:3,target:4,elem_id:5,elem_classes:6,visible:7,mode:8,label:9,show_label:10,loading_status:11})}}const Q0=Z0,$0=["static","dynamic"],yb=Object.freeze(Object.defineProperty({__proto__:null,Component:Q0,modes:$0},Symbol.toStringTag,{value:"Module"}));export{sp as A,mb as B,wg as C,_d as D,k as E,yb as F,ee as I,gr as L,Vr as N,Ih as P,Jt as S,q as T,pb as a,cb as b,xe as c,fb as d,L as e,bp as f,Ge as g,pe as h,lp as i,Wi as j,Gn as k,Ie as l,Vh as m,Bt as n,gp as o,ab as p,Hh as q,li as r,$d as s,m as t,Ip as u,B as v,db as w,lb as x,gb as y,ub as z}; -//# sourceMappingURL=index-7045bfe3.js.map diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Info-7c1e7874.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Info-7c1e7874.js deleted file mode 100644 index 34a61f70afd6993ab3fc19e634ffe636d1fc8ee1..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Info-7c1e7874.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as i,e as r,s as u,a9 as f,m as _,g as c,h as p,ab as m,ac as d,ad as $,w as g,u as v,k as h}from"./index-39fce9e2.js";import"./Button-79f6e3bf.js";function b(n){let s,a;const l=n[1].default,e=f(l,n,n[0],null);return{c(){s=_("div"),e&&e.c(),c(s,"class","svelte-e8n7p6")},m(t,o){p(t,s,o),e&&e.m(s,null),a=!0},p(t,[o]){e&&e.p&&(!a||o&1)&&m(e,l,t,t[0],a?$(l,t[0],o,null):d(t[0]),null)},i(t){a||(g(e,t),a=!0)},o(t){v(e,t),a=!1},d(t){t&&h(s),e&&e.d(t)}}}function I(n,s,a){let{$$slots:l={},$$scope:e}=s;return n.$$set=t=>{"$$scope"in t&&a(0,e=t.$$scope)},[e,l]}class q extends i{constructor(s){super(),r(this,s,I,b,u,{})}}export{q as I}; -//# sourceMappingURL=Info-7c1e7874.js.map diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/importlib_resources/tests/test_resource.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/importlib_resources/tests/test_resource.py deleted file mode 100644 index 677110cdcbae0b4a39ac72e82f842585ad816be9..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/importlib_resources/tests/test_resource.py +++ /dev/null @@ -1,252 +0,0 @@ -import contextlib -import sys -import unittest -import importlib_resources as resources -import uuid -import pathlib - -from . import data01 -from . import zipdata01, zipdata02 -from . import util -from importlib import import_module -from ._compat import import_helper, os_helper, unlink - - -class ResourceTests: - # Subclasses are expected to set the `data` attribute. - - def test_is_file_exists(self): - target = resources.files(self.data) / 'binary.file' - self.assertTrue(target.is_file()) - - def test_is_file_missing(self): - target = resources.files(self.data) / 'not-a-file' - self.assertFalse(target.is_file()) - - def test_is_dir(self): - target = resources.files(self.data) / 'subdirectory' - self.assertFalse(target.is_file()) - self.assertTrue(target.is_dir()) - - -class ResourceDiskTests(ResourceTests, unittest.TestCase): - def setUp(self): - self.data = data01 - - -class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase): - pass - - -def names(traversable): - return {item.name for item in traversable.iterdir()} - - -class ResourceLoaderTests(unittest.TestCase): - def test_resource_contents(self): - package = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C'] - ) - self.assertEqual(names(resources.files(package)), {'A', 'B', 'C'}) - - def test_is_file(self): - package = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F'] - ) - self.assertTrue(resources.files(package).joinpath('B').is_file()) - - def test_is_dir(self): - package = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F'] - ) - self.assertTrue(resources.files(package).joinpath('D').is_dir()) - - def test_resource_missing(self): - package = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F'] - ) - self.assertFalse(resources.files(package).joinpath('Z').is_file()) - - -class ResourceCornerCaseTests(unittest.TestCase): - def test_package_has_no_reader_fallback(self): - """ - Test odd ball packages which: - # 1. Do not have a ResourceReader as a loader - # 2. Are not on the file system - # 3. Are not in a zip file - """ - module = util.create_package( - file=data01, path=data01.__file__, contents=['A', 'B', 'C'] - ) - # Give the module a dummy loader. - module.__loader__ = object() - # Give the module a dummy origin. - module.__file__ = '/path/which/shall/not/be/named' - module.__spec__.loader = module.__loader__ - module.__spec__.origin = module.__file__ - self.assertFalse(resources.files(module).joinpath('A').is_file()) - - -class ResourceFromZipsTest01(util.ZipSetupBase, unittest.TestCase): - ZIP_MODULE = zipdata01 # type: ignore - - def test_is_submodule_resource(self): - submodule = import_module('ziptestdata.subdirectory') - self.assertTrue(resources.files(submodule).joinpath('binary.file').is_file()) - - def test_read_submodule_resource_by_name(self): - self.assertTrue( - resources.files('ziptestdata.subdirectory') - .joinpath('binary.file') - .is_file() - ) - - def test_submodule_contents(self): - submodule = import_module('ziptestdata.subdirectory') - self.assertEqual( - names(resources.files(submodule)), {'__init__.py', 'binary.file'} - ) - - def test_submodule_contents_by_name(self): - self.assertEqual( - names(resources.files('ziptestdata.subdirectory')), - {'__init__.py', 'binary.file'}, - ) - - def test_as_file_directory(self): - with resources.as_file(resources.files('ziptestdata')) as data: - assert data.name == 'ziptestdata' - assert data.is_dir() - assert data.joinpath('subdirectory').is_dir() - assert len(list(data.iterdir())) - assert not data.parent.exists() - - -class ResourceFromZipsTest02(util.ZipSetupBase, unittest.TestCase): - ZIP_MODULE = zipdata02 # type: ignore - - def test_unrelated_contents(self): - """ - Test thata zip with two unrelated subpackages return - distinct resources. Ref python/importlib_resources#44. - """ - self.assertEqual( - names(resources.files('ziptestdata.one')), - {'__init__.py', 'resource1.txt'}, - ) - self.assertEqual( - names(resources.files('ziptestdata.two')), - {'__init__.py', 'resource2.txt'}, - ) - - -@contextlib.contextmanager -def zip_on_path(dir): - data_path = pathlib.Path(zipdata01.__file__) - source_zip_path = data_path.parent.joinpath('ziptestdata.zip') - zip_path = pathlib.Path(dir) / f'{uuid.uuid4()}.zip' - zip_path.write_bytes(source_zip_path.read_bytes()) - sys.path.append(str(zip_path)) - import_module('ziptestdata') - - try: - yield - finally: - with contextlib.suppress(ValueError): - sys.path.remove(str(zip_path)) - - with contextlib.suppress(KeyError): - del sys.path_importer_cache[str(zip_path)] - del sys.modules['ziptestdata'] - - with contextlib.suppress(OSError): - unlink(zip_path) - - -class DeletingZipsTest(unittest.TestCase): - """Having accessed resources in a zip file should not keep an open - reference to the zip. - """ - - def setUp(self): - self.fixtures = contextlib.ExitStack() - self.addCleanup(self.fixtures.close) - - modules = import_helper.modules_setup() - self.addCleanup(import_helper.modules_cleanup, *modules) - - temp_dir = self.fixtures.enter_context(os_helper.temp_dir()) - self.fixtures.enter_context(zip_on_path(temp_dir)) - - def test_iterdir_does_not_keep_open(self): - [item.name for item in resources.files('ziptestdata').iterdir()] - - def test_is_file_does_not_keep_open(self): - resources.files('ziptestdata').joinpath('binary.file').is_file() - - def test_is_file_failure_does_not_keep_open(self): - resources.files('ziptestdata').joinpath('not-present').is_file() - - @unittest.skip("Desired but not supported.") - def test_as_file_does_not_keep_open(self): # pragma: no cover - resources.as_file(resources.files('ziptestdata') / 'binary.file') - - def test_entered_path_does_not_keep_open(self): - """ - Mimic what certifi does on import to make its bundle - available for the process duration. - """ - resources.as_file(resources.files('ziptestdata') / 'binary.file').__enter__() - - def test_read_binary_does_not_keep_open(self): - resources.files('ziptestdata').joinpath('binary.file').read_bytes() - - def test_read_text_does_not_keep_open(self): - resources.files('ziptestdata').joinpath('utf-8.file').read_text( - encoding='utf-8' - ) - - -class ResourceFromNamespaceTest01(unittest.TestCase): - site_dir = str(pathlib.Path(__file__).parent) - - @classmethod - def setUpClass(cls): - sys.path.append(cls.site_dir) - - @classmethod - def tearDownClass(cls): - sys.path.remove(cls.site_dir) - - def test_is_submodule_resource(self): - self.assertTrue( - resources.files(import_module('namespacedata01')) - .joinpath('binary.file') - .is_file() - ) - - def test_read_submodule_resource_by_name(self): - self.assertTrue( - resources.files('namespacedata01').joinpath('binary.file').is_file() - ) - - def test_submodule_contents(self): - contents = names(resources.files(import_module('namespacedata01'))) - try: - contents.remove('__pycache__') - except KeyError: - pass - self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'}) - - def test_submodule_contents_by_name(self): - contents = names(resources.files('namespacedata01')) - try: - contents.remove('__pycache__') - except KeyError: - pass - self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'}) - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/ddosxd/sydney-inpaint/README.md b/spaces/ddosxd/sydney-inpaint/README.md deleted file mode 100644 index 7cfe70686e7121435eea23bc95f382edc407b913..0000000000000000000000000000000000000000 --- a/spaces/ddosxd/sydney-inpaint/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Sydney Inpaint -emoji: 🔥 -colorFrom: purple -colorTo: blue -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/run.sh b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/run.sh deleted file mode 100644 index 61af4b4950eb11334e55362e3e3c5e2796979a01..0000000000000000000000000000000000000000 --- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/run.sh +++ /dev/null @@ -1,2 +0,0 @@ -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r50 -ps -ef | grep "train" | grep -v grep | awk '{print "kill -9 "$2}' | sh diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/facerender/modules/keypoint_detector.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/facerender/modules/keypoint_detector.py deleted file mode 100644 index 4dc4084cef26a495946fa3adc4eda5847454ffd0..0000000000000000000000000000000000000000 --- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/facerender/modules/keypoint_detector.py +++ /dev/null @@ -1,179 +0,0 @@ -from torch import nn -import torch -import torch.nn.functional as F - -from sad_talker.src.facerender.sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d -from sad_talker.src.facerender.modules.util import KPHourglass, make_coordinate_grid, AntiAliasInterpolation2d, ResBottleneck - - -class KPDetector(nn.Module): - """ - Detecting canonical keypoints. Return keypoint position and jacobian near each keypoint. - """ - - def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, reshape_channel, reshape_depth, - num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False): - super(KPDetector, self).__init__() - - self.predictor = KPHourglass(block_expansion, in_features=image_channel, - max_features=max_features, reshape_features=reshape_channel, reshape_depth=reshape_depth, num_blocks=num_blocks) - - # self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=7, padding=3) - self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=3, padding=1) - - if estimate_jacobian: - self.num_jacobian_maps = 1 if single_jacobian_map else num_kp - # self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=7, padding=3) - self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=3, padding=1) - ''' - initial as: - [[1 0 0] - [0 1 0] - [0 0 1]] - ''' - self.jacobian.weight.data.zero_() - self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float)) - else: - self.jacobian = None - - self.temperature = temperature - self.scale_factor = scale_factor - if self.scale_factor != 1: - self.down = AntiAliasInterpolation2d(image_channel, self.scale_factor) - - def gaussian2kp(self, heatmap): - """ - Extract the mean from a heatmap - """ - shape = heatmap.shape - heatmap = heatmap.unsqueeze(-1) - grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0) - value = (heatmap * grid).sum(dim=(2, 3, 4)) - kp = {'value': value} - - return kp - - def forward(self, x): - if self.scale_factor != 1: - x = self.down(x) - - feature_map = self.predictor(x) - prediction = self.kp(feature_map) - - final_shape = prediction.shape - heatmap = prediction.view(final_shape[0], final_shape[1], -1) - heatmap = F.softmax(heatmap / self.temperature, dim=2) - heatmap = heatmap.view(*final_shape) - - out = self.gaussian2kp(heatmap) - - if self.jacobian is not None: - jacobian_map = self.jacobian(feature_map) - jacobian_map = jacobian_map.reshape(final_shape[0], self.num_jacobian_maps, 9, final_shape[2], - final_shape[3], final_shape[4]) - heatmap = heatmap.unsqueeze(2) - - jacobian = heatmap * jacobian_map - jacobian = jacobian.view(final_shape[0], final_shape[1], 9, -1) - jacobian = jacobian.sum(dim=-1) - jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 3, 3) - out['jacobian'] = jacobian - - return out - - -class HEEstimator(nn.Module): - """ - Estimating head pose and expression. - """ - - def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, num_bins=66, estimate_jacobian=True): - super(HEEstimator, self).__init__() - - self.conv1 = nn.Conv2d(in_channels=image_channel, out_channels=block_expansion, kernel_size=7, padding=3, stride=2) - self.norm1 = BatchNorm2d(block_expansion, affine=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.conv2 = nn.Conv2d(in_channels=block_expansion, out_channels=256, kernel_size=1) - self.norm2 = BatchNorm2d(256, affine=True) - - self.block1 = nn.Sequential() - for i in range(3): - self.block1.add_module('b1_'+ str(i), ResBottleneck(in_features=256, stride=1)) - - self.conv3 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1) - self.norm3 = BatchNorm2d(512, affine=True) - self.block2 = ResBottleneck(in_features=512, stride=2) - - self.block3 = nn.Sequential() - for i in range(3): - self.block3.add_module('b3_'+ str(i), ResBottleneck(in_features=512, stride=1)) - - self.conv4 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1) - self.norm4 = BatchNorm2d(1024, affine=True) - self.block4 = ResBottleneck(in_features=1024, stride=2) - - self.block5 = nn.Sequential() - for i in range(5): - self.block5.add_module('b5_'+ str(i), ResBottleneck(in_features=1024, stride=1)) - - self.conv5 = nn.Conv2d(in_channels=1024, out_channels=2048, kernel_size=1) - self.norm5 = BatchNorm2d(2048, affine=True) - self.block6 = ResBottleneck(in_features=2048, stride=2) - - self.block7 = nn.Sequential() - for i in range(2): - self.block7.add_module('b7_'+ str(i), ResBottleneck(in_features=2048, stride=1)) - - self.fc_roll = nn.Linear(2048, num_bins) - self.fc_pitch = nn.Linear(2048, num_bins) - self.fc_yaw = nn.Linear(2048, num_bins) - - self.fc_t = nn.Linear(2048, 3) - - self.fc_exp = nn.Linear(2048, 3*num_kp) - - def forward(self, x): - out = self.conv1(x) - out = self.norm1(out) - out = F.relu(out) - out = self.maxpool(out) - - out = self.conv2(out) - out = self.norm2(out) - out = F.relu(out) - - out = self.block1(out) - - out = self.conv3(out) - out = self.norm3(out) - out = F.relu(out) - out = self.block2(out) - - out = self.block3(out) - - out = self.conv4(out) - out = self.norm4(out) - out = F.relu(out) - out = self.block4(out) - - out = self.block5(out) - - out = self.conv5(out) - out = self.norm5(out) - out = F.relu(out) - out = self.block6(out) - - out = self.block7(out) - - out = F.adaptive_avg_pool2d(out, 1) - out = out.view(out.shape[0], -1) - - yaw = self.fc_roll(out) - pitch = self.fc_pitch(out) - roll = self.fc_yaw(out) - t = self.fc_t(out) - exp = self.fc_exp(out) - - return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp} - diff --git a/spaces/diacanFperku/AutoGPT/Biblia Bilingva Romana Engleza Pdf 48.md b/spaces/diacanFperku/AutoGPT/Biblia Bilingva Romana Engleza Pdf 48.md deleted file mode 100644 index 5734dfea8196662473bb6b43f60d1f3b5ce8142f..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Biblia Bilingva Romana Engleza Pdf 48.md +++ /dev/null @@ -1,14 +0,0 @@ -

    biblia bilingva romana engleza pdf 48


    Download ❤❤❤ https://gohhs.com/2uFTBG



    - -7z scris A-ROK .mp3 Închide şi descarca: - -2018-07-25T12:00:00-04:002018-07-25T16:00:00-04:00 DAYS ONLY! Celebrate the Fourth of July in style with the veterans of U.S. Armed Forces. Enjoy live music, a 4th of July movie on the big screen, food, a carnival, and more. For more info and tickets, visit of the United States of America, 4th of July Parade301 Jefferson St NW, Washington, District of Columbia 20001Capital Building, Washington DC - -Veterans of the United States of America, 4th of July Parade - -TWO DAYS ONLY! Celebrate the Fourth of July in style with the veterans of U.S. Armed Forces. Enjoy live music, a 4th of July movie on the big screen, food, a carnival, and more. For more info and tickets, visit - -1. Please note: the event will take place at the Capital Building, 101 Jefferson St. NW, Washington DC.2. For questions or for special accommodations, please call 678-301-4580 or email [email protected].3. A valid government-issued photo ID is required for admission.France is reportedly planning to increase the number of combat troops in Mali from 2,000 to 4,000 to help strengthen the country’s security forces. The number is expected to be used as a deterrent to al-Qaeda-linked fighters in the northern region and possibly to assist in the eventual capture of the central city of Gao, one of the last major cities held by the al-Qaeda-affiliated fighters, a French official said. The new French contingent, which could be deployed by the end of February, will not operate in the main cities of northern Mali 4fefd39f24
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/Cracksagac30.md b/spaces/diacanFperku/AutoGPT/Cracksagac30.md deleted file mode 100644 index 0416ac49963604c076f25d9c4cc30767dd7af00f..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Cracksagac30.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Cracksagac30


    DOWNLOAD ————— https://gohhs.com/2uFVEE



    - -SAT Chemistry Practice Test Nuclear Chemistry cracksat net. April 20th, 2019 - SAT chemistry ... Vox Ac30 Tb Manual · 1989 Jeep Cherokee Fuse Diagram. 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/Dbs Walk And Follow Crack TOP.md b/spaces/diacanFperku/AutoGPT/Dbs Walk And Follow Crack TOP.md deleted file mode 100644 index 309cfcdbbf650380ab17b818e851f33f0e572454..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Dbs Walk And Follow Crack TOP.md +++ /dev/null @@ -1,7 +0,0 @@ - -

    while goku and gohan are gathering some water, whis telepathically reads their minds and tells them about chi-chi's discovery and requests their help. goku and gohan return to the group and upon arrival speak to chi-chi to learn more about the decoction. chi-chi tells them that this decoction is called "combo fruit" and goku marvels that it can revive dead people. before they can further discuss things, the two as well as shin and kibito are attacked by dabura who killed kibito. goku, gohan and kibito are transported to the desert. chiaotzu and yamcha are then attacked by menos who stops them from transporting anyone. vegeta arrives and removes his anger towards dabura and starts to follow gohan and chi-chi's plan for a battle. in the desert, goku examines the decoction and is even more amazed by its properties. bulma arrives to the trio and the three decide to take the decoction to beerus.

    -

    back at babidi's ship, beerus greets his father and asks about the decoction. beerus urges goku to drink the decoction to revive his dead planet and to drink the decoction, and vegeta claims that this will destroy the entire universe. goku drinks the decoction, and as it takes effect, goku transforms into radditz. he then proceeds to battle all of babidi's minions, using the decoction to turn their powers to his side so that he can beat the entire fleet of evil monsters. once the transformation ends, beerus comments that goku is too powerful and sends him to a desert so he can lose all of his energy in the heat.

    -

    Dbs Walk And Follow Crack


    Download File ✸✸✸ https://gohhs.com/2uFUNW



    -

    while goku is at the desert, vegeta starts to realize that the decoction has made him into a new version of majin vegeta. when vegeta asks babidi whether he also wants to absorb his energy, the fiend claims that he is not interested and quickly reverts vegeta's back to his normal self. back on the ship, beerus says to goku that he must lead the others to the source of the decoction to save them. beerus and whis then go to a planet where they locate goku and take them to the planet's cracked surface. the two enter the crack of time after witnessing a memory of universe 6 being kicked out by babidi and goku becoming the strongest by defeating the feared monsters of the universe. there they encounter a kuririn who resembles kuril krillin.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Gaussian 09 Torrent 1357.md b/spaces/diacanFperku/AutoGPT/Gaussian 09 Torrent 1357.md deleted file mode 100644 index 89e7e6e4e97cc02f37c9a65f07141138a04c62db..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Gaussian 09 Torrent 1357.md +++ /dev/null @@ -1,8 +0,0 @@ -

    Gaussian 09 torrent 1357


    Download File >>> https://gohhs.com/2uFTHs



    - -Application supports Windows, Linux and macOS. Linux platform is tested for Ubuntu 16.04, Ubuntu 14.04, Ubuntu 12.04 and Ubuntu 10.04. We recommend latest version of Linux. Download it for the best experience. gaussian,. gaussian noise, gaussian python Download it for the best experience. Gaussian 09 is a commercial software. It supports Windows, Linux and macOS. - -Get gaussian 9! You can get gaussian 9 license and you can get gaussian 9 product key.gaussian license, gaussian product key, gaussian vpn, gaussian internet, gaussian office, gaussian messenger, gaussian code license, gaussian code product key. gaussian 10, gaussian 10 license, gaussian 10 product key. Gaussian 10 has a limited function, so you can only use it with a gaussian 10 license. Download it for the best experience. You can get gaussian 10 product key.gaussian 10 license, gaussian 10 product key, gaussian 10 network, gaussian 10 messenger, gaussian 10 code license, gaussian 10 code product key. You can get gaussian 10 license and you can get gaussian 10 product key.gaussian 10, gaussian 10 license, gaussian 10 product key, gaussian 10 network, gaussian 10 messenger, gaussian 10 code license, gaussian 10 code product key. gaussian 11, gaussian 11 license, gaussian 11 product key. gaussian 11 download, gaussian 11 license, gaussian 11 product key, gaussian 11 code license, gaussian 11 code product key.gaussian 11 license, gaussian 11 product key, gaussian 11 network, gaussian 11 messenger, gaussian 11 code license, gaussian 11 code product key.gaussian 11 license, gaussian 11 product key, gaussian 11 network, gaussian 11 messenger, gaussian 11 code license, gaussian 11 code product key.gaussian 11 license, gaussian 11 product key, gaussian 11 network, gaussian 11 messenger, gaussian 11 code license, gaussian 11 code product key.gaussian 11 license, gaussian 11 product key, gaussian 11 network, gaussian 11 messenger, gaussian 11 code license, gaussian 11 code product key.gaussian 11 license, gaussian 11 product key, gaussian 11 network, gaussian 11 messenger, gaussian 11 code license, gaussian 11 code product key.gaussian 11 license, gaussian 11 product key, gaussian 11 network 4fefd39f24
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/New! Mixw 2.19 Crack Rapidshare [WORK].md b/spaces/diacanFperku/AutoGPT/New! Mixw 2.19 Crack Rapidshare [WORK].md deleted file mode 100644 index 705bf1148a89130ccf78241d6ee0d66fdfd983ed..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/New! Mixw 2.19 Crack Rapidshare [WORK].md +++ /dev/null @@ -1,13 +0,0 @@ -

    New! mixw 2.19 crack rapidshare


    DOWNLOAD ===== https://gohhs.com/2uFVA6



    -
    -DOWNLOAD: · 598d631155. Similar · New! mixw 2.19 Crack Rapidshare Lucio Dalla Caruso spartito for pianoforte.pdf Hindi Movie ... Download New -· Download · -Similar · New! darbouka. -DOWNLOAD: · 554c77b1ecb. -Similar · New! kyol. -DOWNLOAD: e8b2cd3a953. -Similar · New! -· Download 8a78ff9644
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/Ocean Sea Alessandro Baricco Pdf ((INSTALL)) Download.md b/spaces/diacanFperku/AutoGPT/Ocean Sea Alessandro Baricco Pdf ((INSTALL)) Download.md deleted file mode 100644 index 7f18e2df3d7e151913b8c09d82be163c9cc9205a..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Ocean Sea Alessandro Baricco Pdf ((INSTALL)) Download.md +++ /dev/null @@ -1,33 +0,0 @@ -
    -

    Ocean Sea Alessandro Baricco Pdf Download: A Novel of Magic and Mystery

    -

    If you are looking for a novel that will transport you to a different world, you should try Ocean Sea Alessandro Baricco Pdf Download. This novel is a masterpiece of Italian literature, written by Alessandro Baricco, one of the most acclaimed and original writers of our time. Ocean Sea Alessandro Baricco Pdf Download is a novel that explores the themes of love, death, art, science and the sea in a poetic and captivating way.

    -

    Ocean Sea Alessandro Baricco Pdf Download


    Download ››› https://gohhs.com/2uFT89



    -

    What is Ocean Sea Alessandro Baricco Pdf Download About?

    -

    Ocean Sea Alessandro Baricco Pdf Download is a novel that tells the stories of several characters who converge in a small seaside hotel at the end of the 19th century. The hotel is called Almayer Inn, and it is located on the edge of the ocean, where the sea seems to have no end. The characters are:

    -
      -
    • A scientist who wants to measure the sea and find its end.
    • -
    • A painter who wants to capture the essence of the sea on his canvas.
    • -
    • A young girl who is dying of a mysterious illness and believes that the sea can cure her.
    • -
    • A woman who has been betrayed by her husband and seeks revenge.
    • -
    • A sailor who has a secret past and a mysterious mission.
    • -
    -

    The novel follows their lives, their dreams, their fears and their passions as they interact with each other and with the sea. The novel is full of surprises, twists and turns, and reveals the secrets and mysteries of the ocean and the human soul.

    -

    Why You Should Read Ocean Sea Alessandro Baricco Pdf Download

    -

    There are many reasons why you should read Ocean Sea Alessandro Baricco Pdf Download. Here are some of them:

    -

    -
      -
    • The novel is beautifully written, with a lyrical and poetic language that will enchant you.
    • -
    • The novel is rich in symbolism, metaphors and allegories that will make you think and reflect.
    • -
    • The novel is original and innovative, with a unique style and structure that will challenge you.
    • -
    • The novel is captivating and engaging, with a plot that will keep you hooked until the end.
    • -
    • The novel is inspiring and moving, with characters that will touch your heart and emotions.
    • -
    -

    How to Get Ocean Sea Alessandro Baricco Pdf Download

    -

    If you want to get Ocean Sea Alessandro Baricco Pdf Download, you have several options available. You can either buy it from a bookstore or an online retailer, or you can download it for free from various websites. However, you should be careful about the quality and legality of the sources you choose. Some of the sources may have low-quality or corrupted files or may violate the copyright laws. Therefore, you should always use trusted and reliable sources to get Ocean Sea Alessandro Baricco Pdf Download.

    -

    One of the best sources to get Ocean Sea Alessandro Baricco Pdf Download is Archive.org. This website offers free access to millions of books, movies, music and other media files that are in the public domain or have been donated by their owners. You can download Ocean Sea Alessandro Baricco Pdf Download from Archive.org in different formats such as PDF, EPUB or MOBI depending on your preference and device compatibility. You can also read Ocean Sea Alessandro Baricco Pdf Download online or offline without any hassle or risk.

    -

    Conclusion

    -

    Ocean Sea Alessandro Baricco Pdf Download is a novel that will take you on a magical and mysterious journey into the depths of the sea and the soul. It is a novel that will make you feel, think and wonder. It is a novel that you will not regret reading. You can get Ocean Sea Alessandro Baricco Pdf Download from Archive.org with ease and convenience.

    -

    Conclusion

    -

    Ocean Sea Alessandro Baricco Pdf Download is a novel that will take you on a magical and mysterious journey into the depths of the sea and the soul. It is a novel that will make you feel, think and wonder. It is a novel that you will not regret reading. You can get Ocean Sea Alessandro Baricco Pdf Download from Archive.org with ease and convenience.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Proprompter Software 3.3.0 Seria [TOP].md b/spaces/diacanFperku/AutoGPT/Proprompter Software 3.3.0 Seria [TOP].md deleted file mode 100644 index a4b6bfb8415117c4a1039849dce8cd566d991fb5..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Proprompter Software 3.3.0 Seria [TOP].md +++ /dev/null @@ -1,26 +0,0 @@ - -

    How to Use Proprompter Software 3.3.0 Seria for Professional Teleprompting

    -

    Proprompter Software 3.3.0 Seria is a cross-platform teleprompting software that can help you create and deliver smooth and flawless speeches, presentations, podcasts, and more. Whether you are a professional speaker, a broadcaster, a teacher, or a student, Proprompter Software 3.3.0 Seria can help you improve your communication skills and confidence.

    -

    In this article, we will show you how to use Proprompter Software 3.3.0 Seria for professional teleprompting, including how to install it, how to create and edit scripts, how to adjust settings, and how to control the scrolling speed and direction.

    -

    Proprompter Software 3.3.0 Seria


    DOWNLOAD »»» https://gohhs.com/2uFU0f



    -

    How to Install Proprompter Software 3.3.0 Seria

    -

    Proprompter Software 3.3.0 Seria is available for both Mac and Windows operating systems. You can download it from the official website[^5^] or from other sources[^1^]. To install it, you will need a serial number that you can obtain from the developer or from online sources[^2^] [^3^]. Follow the instructions on the screen to complete the installation process.

    -

    How to Create and Edit Scripts

    -

    Proprompter Software 3.3.0 Seria has a built-in word processor that allows you to create and edit scripts easily. You can also import scripts from other programs, such as Microsoft Word[^6^], or copy and paste text from other sources. You can use multiple fonts, sizes, colors, and styles in your script, as well as add bookmarks, spell check, undo/redo, search/replace, and print functions.

    -

    To create a new script, click on File > New or press Ctrl+N on your keyboard. To open an existing script, click on File > Open or press Ctrl+O on your keyboard. To save your script, click on File > Save or press Ctrl+S on your keyboard.

    -

    How to Adjust Settings

    -

    Proprompter Software 3.3.0 Seria allows you to customize various settings to suit your preferences and needs. You can access the settings by clicking on Edit > Preferences or pressing Ctrl+P on your keyboard. Some of the settings you can adjust are:

    -
      -
    • Screen size: You can change the size of the teleprompter window by dragging the edges or corners of the window or by entering a specific width and height in pixels.
    • -
    • Background color: You can change the background color of the teleprompter window by clicking on the color box and choosing a color from the palette or by entering a hexadecimal code.
    • -
    • Eyeline indicator: You can enable or disable an eyeline indicator that shows where you should look at the screen by checking or unchecking the box next to Eyeline Indicator.
    • -
    • Dual screen operation: You can enable or disable dual screen operation that allows you to use two monitors for teleprompting by checking or unchecking the box next to Dual Screen Operation.
    • -
    • Timer: You can enable or disable a timer that shows how much time has elapsed, how much time is remaining, or how much time you need to fit your script by checking or unchecking the box next to Timer.
    • -
    • Controls: You can customize the keyboard shortcuts for various functions, such as start/stop scrolling, pause/resume scrolling, increase/decrease speed, jump to bookmark, etc.
    • -
    -

    How to Control Scrolling Speed and Direction

    -

    Proprompter Software 3.3.0 Seria allows you to control the scrolling speed and direction of your script using different methods, such as keyboard commands, mouse movements, wireless remote controls, foot pedals[^6^], etc.

    -

    To start scrolling your script, press F5 on your keyboard or click on Prompt > Start Prompting on the menu bar. To stop scrolling your script, press F5 again or click on Prompt > Stop Prompting on the menu bar.

    -

    To pause/resume scrolling your script, press F6 on your keyboard or click on Prompt > Pause/Resume Prompting on the menu bar.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/dietician/rewriteData/app.py b/spaces/dietician/rewriteData/app.py deleted file mode 100644 index 591142a36cf99bf4decb9dbe23f959c9e00a0141..0000000000000000000000000000000000000000 --- a/spaces/dietician/rewriteData/app.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -import gradio as gr -import requests - -def rewriteTheData(data): - - api_key = "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"; - api_key = "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"; - api_key = "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"; - - # json_input = json.loads(input); - URL = "https://api.openai.com/v1/chat/completions" - - payload = { - "model": "gpt-3.5-turbo", - "messages": [{"role": "user", "content": data}, - {"role": "system", "content": "input is in the json formate, just rewrite the text and keep the meaning same. respond with json sormate data. don't add additional information like this is generated by AI"}], - "temperature" : 0.0, - "top_p":1.0, - "n" : 1, - "stream": False, - "presence_penalty":0, - "frequency_penalty":0, - } - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {api_key}" - } - - response = requests.post(URL, headers=headers, json=payload, stream=False) - - json_data = json.loads(response.text) - - print(json_data) - - return json_data["choices"][0]['message']["content"] - - -def greet(data): - return rewriteTheData(data) - -demo = gr.Interface(fn=greet, inputs="text", outputs="text") - -demo.launch() \ No newline at end of file diff --git a/spaces/digitalxingtong/Azusa-Bert-VITS2/models.py b/spaces/digitalxingtong/Azusa-Bert-VITS2/models.py deleted file mode 100644 index d4afe44d883691610c5903e602a3ca245fcb3a5c..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Azusa-Bert-VITS2/models.py +++ /dev/null @@ -1,707 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -from commons import init_weights, get_padding -from text import symbols, num_tones, num_languages -class DurationDiscriminator(nn.Module): #vits2 - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.dur_proj = nn.Conv1d(1, filter_channels, 1) - - self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_1 = modules.LayerNorm(filter_channels) - self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_2 = modules.LayerNorm(filter_channels) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - self.output_layer = nn.Sequential( - nn.Linear(filter_channels, 1), - nn.Sigmoid() - ) - - def forward_probability(self, x, x_mask, dur, g=None): - dur = self.dur_proj(dur) - x = torch.cat([x, dur], dim=1) - x = self.pre_out_conv_1(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_1(x) - x = self.drop(x) - x = self.pre_out_conv_2(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_2(x) - x = self.drop(x) - x = x * x_mask - x = x.transpose(1, 2) - output_prob = self.output_layer(x) - return output_prob - - def forward(self, x, x_mask, dur_r, dur_hat, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - - output_probs = [] - for dur in [dur_r, dur_hat]: - output_prob = self.forward_probability(x, x_mask, dur, g) - output_probs.append(output_prob) - - return output_probs - -class TransformerCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - n_flows=4, - gin_channels=0, - share_parameter=False - ): - - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - - self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = self.gin_channels) if share_parameter else None - - for i in range(n_flows): - self.flows.append( - modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout, filter_channels, mean_only=True, wn_sharing_parameter=self.wn, gin_channels = self.gin_channels)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=0): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - self.emb = nn.Embedding(len(symbols), hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - self.tone_emb = nn.Embedding(num_tones, hidden_channels) - nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels ** -0.5) - self.language_emb = nn.Embedding(num_languages, hidden_channels) - nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels ** -0.5) - self.bert_proj = nn.Conv1d(1024, hidden_channels, 1) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, tone, language, bert, g=None): - x = (self.emb(x)+ self.tone_emb(tone)+ self.language_emb(language)+self.bert_proj(bert).transpose(1,2)) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask, g=g) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - -class ReferenceEncoder(nn.Module): - ''' - inputs --- [N, Ty/r, n_mels*r] mels - outputs --- [N, ref_enc_gru_size] - ''' - - def __init__(self, spec_channels, gin_channels=0): - - super().__init__() - self.spec_channels = spec_channels - ref_enc_filters = [32, 32, 64, 64, 128, 128] - K = len(ref_enc_filters) - filters = [1] + ref_enc_filters - convs = [weight_norm(nn.Conv2d(in_channels=filters[i], - out_channels=filters[i + 1], - kernel_size=(3, 3), - stride=(2, 2), - padding=(1, 1))) for i in range(K)] - self.convs = nn.ModuleList(convs) - # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) - - out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K) - self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels, - hidden_size=256 // 2, - batch_first=True) - self.proj = nn.Linear(128, gin_channels) - - def forward(self, inputs, mask=None): - N = inputs.size(0) - out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs] - for conv in self.convs: - out = conv(out) - # out = wn(out) - out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K] - - out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] - T = out.size(1) - N = out.size(0) - out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] - - self.gru.flatten_parameters() - memory, out = self.gru(out) # out --- [1, N, 128] - - return self.proj(out.squeeze(0)) - - def calculate_channels(self, L, kernel_size, stride, pad, n_convs): - for i in range(n_convs): - L = (L - kernel_size + 2 * pad) // stride + 1 - return L - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=256, - gin_channels=256, - use_sdp=True, - n_flow_layer = 4, - n_layers_trans_flow = 3, - flow_share_parameter = False, - use_transformer_flow = True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - self.n_layers_trans_flow = n_layers_trans_flow - self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", True) - self.use_sdp = use_sdp - self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False) - self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01) - self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6) - self.current_mas_noise_scale = self.mas_noise_scale_initial - if self.use_spk_conditioned_encoder and gin_channels > 0: - self.enc_gin_channels = gin_channels - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.enc_gin_channels) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - if use_transformer_flow: - self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter) - else: - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels) - self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers >= 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - else: - self.ref_enc = ReferenceEncoder(spec_channels, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert): - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), - s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - if self.use_noise_scaled_mas: - epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale - neg_cent = neg_cent + epsilon - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - - l_length_sdp = self.sdp(x, x_mask, w, g=g) - l_length_sdp = l_length_sdp / torch.sum(x_mask) - - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging - - l_length = l_length_dp + l_length_sdp - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_) - - def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None): - #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert) - # g = self.gst(y) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) diff --git a/spaces/dineshreddy/WALT/cwalt/CWALT.py b/spaces/dineshreddy/WALT/cwalt/CWALT.py deleted file mode 100644 index 894578c1c75766cf27999dbb1fe64a4c4dcf4efb..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/cwalt/CWALT.py +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Oct 19 19:14:47 2021 - -@author: dinesh -""" -import glob -from .utils import bb_intersection_over_union_unoccluded -import numpy as np -from PIL import Image -import datetime -import cv2 -import os -from tqdm import tqdm - - -def get_image(time, folder): - for week_loop in range(5): - try: - image = np.array(Image.open(folder+'/week' +str(week_loop)+'/'+ str(time).replace(' ','T').replace(':','-').split('+')[0] + '.jpg')) - break - except: - continue - if image is None: - print('file not found') - return image - -def get_mask(segm, image): - poly = np.array(segm).reshape((int(len(segm)/2), 2)) - mask = image.copy()*0 - cv2.fillConvexPoly(mask, poly, (255, 255, 255)) - return mask - -def get_unoccluded(indices, tracks_all): - unoccluded_indexes = [] - unoccluded_index_all =[] - while 1: - unoccluded_clusters = [] - len_unocc = len(unoccluded_indexes) - for ind in indices: - if ind in unoccluded_indexes: - continue - occ = False - for ind_compare in indices: - if ind_compare in unoccluded_indexes: - continue - if bb_intersection_over_union_unoccluded(tracks_all[ind], tracks_all[ind_compare]) > 0.01 and ind_compare != ind: - occ = True - if occ==False: - unoccluded_indexes.extend([ind]) - unoccluded_clusters.extend([ind]) - if len(unoccluded_indexes) == len_unocc and len_unocc != 0: - for ind in indices: - if ind not in unoccluded_indexes: - unoccluded_indexes.extend([ind]) - unoccluded_clusters.extend([ind]) - - unoccluded_index_all.append(unoccluded_clusters) - if len(unoccluded_indexes) > len(indices)-5: - break - return unoccluded_index_all - -def primes(n): # simple sieve of multiples - odds = range(3, n+1, 2) - sieve = set(sum([list(range(q*q, n+1, q+q)) for q in odds], [])) - return [2] + [p for p in odds if p not in sieve] - -def save_image(image_read, save_path, data, path): - tracks = data['tracks_all_unoccluded'] - segmentations = data['segmentation_all_unoccluded'] - timestamps = data['timestamps_final_unoccluded'] - - image = image_read.copy() - indices = np.random.randint(len(tracks),size=30) - prime_numbers = primes(1000) - unoccluded_index_all = get_unoccluded(indices, tracks) - - mask_stacked = image*0 - mask_stacked_all =[] - count = 0 - time = datetime.datetime.now() - - for l in indices: - try: - image_crop = get_image(timestamps[l], path) - except: - continue - try: - bb_left, bb_top, bb_width, bb_height, confidence = tracks[l] - except: - bb_left, bb_top, bb_width, bb_height, confidence, track_id = tracks[l] - mask = get_mask(segmentations[l], image) - - image[mask > 0] = image_crop[mask > 0] - mask[mask > 0] = 1 - for count, mask_inc in enumerate(mask_stacked_all): - mask_stacked_all[count][cv2.bitwise_and(mask, mask_inc) > 0] = 2 - mask_stacked_all.append(mask) - mask_stacked += mask - count = count+1 - - cv2.imwrite(save_path + '/images/'+str(time).replace(' ','T').replace(':','-').split('+')[0] + '.jpg', image[:, :, ::-1]) - cv2.imwrite(save_path + '/Segmentation/'+str(time).replace(' ','T').replace(':','-').split('+')[0] + '.jpg', mask_stacked[:, :, ::-1]*30) - np.savez_compressed(save_path+'/Segmentation/'+str(time).replace(' ','T').replace(':','-').split('+')[0], mask=mask_stacked_all) - -def CWALT_Generation(camera_name): - save_path_train = 'data/cwalt_train' - save_path_test = 'data/cwalt_test' - - json_file_path = 'data/{}/{}.json'.format(camera_name,camera_name) # iii1/iii1_7_test.json' # './data.json' - path = 'data/' + camera_name - - data = np.load(json_file_path + '.npz', allow_pickle=True) - - ## slip data - - data_train=dict() - data_test=dict() - - split_index = int(len(data['timestamps_final_unoccluded'])*0.8) - - data_train['tracks_all_unoccluded'] = data['tracks_all_unoccluded'][0:split_index] - data_train['segmentation_all_unoccluded'] = data['segmentation_all_unoccluded'][0:split_index] - data_train['timestamps_final_unoccluded'] = data['timestamps_final_unoccluded'][0:split_index] - - data_test['tracks_all_unoccluded'] = data['tracks_all_unoccluded'][split_index:] - data_test['segmentation_all_unoccluded'] = data['segmentation_all_unoccluded'][split_index:] - data_test['timestamps_final_unoccluded'] = data['timestamps_final_unoccluded'][split_index:] - - image_read = np.array(Image.open(path + '/T18-median_image.jpg')) - image_read = cv2.resize(image_read, (int(image_read.shape[1]/2), int(image_read.shape[0]/2))) - - try: - os.mkdir(save_path_train) - except: - print(save_path_train) - - try: - os.mkdir(save_path_train + '/images') - os.mkdir(save_path_train + '/Segmentation') - except: - print(save_path_train+ '/images') - - try: - os.mkdir(save_path_test) - except: - print(save_path_test) - - try: - os.mkdir(save_path_test + '/images') - os.mkdir(save_path_test + '/Segmentation') - except: - print(save_path_test+ '/images') - - for loop in tqdm(range(3000), desc="Generating training CWALT Images "): - save_image(image_read, save_path_train, data_train, path) - - for loop in tqdm(range(300), desc="Generating testing CWALT Images "): - save_image(image_read, save_path_test, data_test, path) - diff --git a/spaces/dorkai/ChatUIPro/README.md b/spaces/dorkai/ChatUIPro/README.md deleted file mode 100644 index 8300f3673c81da328054ca210ec9c4139f1ce369..0000000000000000000000000000000000000000 --- a/spaces/dorkai/ChatUIPro/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: ChatUIPro -emoji: 🐨 -colorFrom: indigo -colorTo: blue -sdk: docker -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dorkai/SINGPT-Temporary/README.md b/spaces/dorkai/SINGPT-Temporary/README.md deleted file mode 100644 index 93354f8e9a0b2b975ea73e53de26122313650c5d..0000000000000000000000000000000000000000 --- a/spaces/dorkai/SINGPT-Temporary/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Text Generation Webui Space -emoji: 🏃 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.20.1 -app_file: run.py -pinned: false -license: mit -duplicated_from: MrD05/text-generation-webui-space ---- - -Check out this repo https://github.com/oobabooga/text-generation-webui diff --git a/spaces/dstackai/dstack-template/README.md b/spaces/dstackai/dstack-template/README.md deleted file mode 100644 index 253253da9bd4e911941f931a74e5c4319634fa36..0000000000000000000000000000000000000000 --- a/spaces/dstackai/dstack-template/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: dstack -emoji: 🧬 -colorFrom: purple -colorTo: cyan -sdk: docker -pinned: false -app_port: 3000 -license: mpl-2.0 ---- diff --git a/spaces/dukai289/scripts/style.css b/spaces/dukai289/scripts/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/dukai289/scripts/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/edenehuyh/BLIQ_ImageCaptioning/app.py b/spaces/edenehuyh/BLIQ_ImageCaptioning/app.py deleted file mode 100644 index 981514839def8a1ffb63ee3c983d7a496a6193ea..0000000000000000000000000000000000000000 --- a/spaces/edenehuyh/BLIQ_ImageCaptioning/app.py +++ /dev/null @@ -1,39 +0,0 @@ -# Import libraries -import torch -import gradio as gr - -from models.blip import blip_decoder -from torchvision import transforms -from torchvision.transforms.functional import InterpolationMode - - -# Download model -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -image_size = 384 -transform = transforms.Compose([ - transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC), - transforms.ToTensor(), - transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) - ]) - -model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth' - -model = blip_decoder(pretrained=model_url, image_size=384, vit='large') -model.eval() -model = model.to(device) - - -# Deploy -title = 'Image Captioning' -description = 'Huỳnh Công Chánh' -inputs = gr.inputs.Image(type='pil') -outputs = gr.outputs.Textbox(label='Output') - -def inference(raw_image): - image = transform(raw_image).unsqueeze(0).to(device) - with torch.no_grad(): - caption = model.generate(image, sample=False, num_beams=3, max_length=20, min_length=5) - return caption[0] - -gr.Interface(inference, inputs, outputs, title=title, description=description).launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/enzostvs/hair-colour/components/form/progress.tsx b/spaces/enzostvs/hair-colour/components/form/progress.tsx deleted file mode 100644 index 13b82e13b4e7a3a9b04de19cb8dd31563036c92c..0000000000000000000000000000000000000000 --- a/spaces/enzostvs/hair-colour/components/form/progress.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import { ResultsInterface } from "./results"; - -export const Progress = ({ result }: { result: ResultsInterface }) => { - return ( -
    -
    -

    {result.label}

    -

    - {(result.score * 100).toFixed(3)}% -

    -
    -
    -
    -
    -
    - ); -}; diff --git a/spaces/epexVfeibi/Imagedeblurr/Adobe Photoshop CS6 Extended Crack .DLL Files 32bit 64bit Free [TOP] Download.md b/spaces/epexVfeibi/Imagedeblurr/Adobe Photoshop CS6 Extended Crack .DLL Files 32bit 64bit Free [TOP] Download.md deleted file mode 100644 index 2bd350078a12ff76fb3d8cc5192c6e553cefb10b..0000000000000000000000000000000000000000 --- a/spaces/epexVfeibi/Imagedeblurr/Adobe Photoshop CS6 Extended Crack .DLL Files 32bit 64bit Free [TOP] Download.md +++ /dev/null @@ -1,12 +0,0 @@ -
    -

    If you are looking for a powerful and versatile music production software, you might want to check out Cakewalk SONAR Producer Edition 8.3.1.372. This program offers a comprehensive set of tools and features for creating, editing, mixing and mastering professional-quality audio tracks. It supports both 32-bit and 64-bit operating systems, so you can enjoy the full potential of your hardware. You can download Cakewalk SONAR Producer Edition 8.3.1.372 from various online sources, such as hotfile, torrent or megaupload. These sites provide fast and reliable downloads, as well as free access to the software. However, please note that downloading software from these sites may be illegal or unsafe, so proceed at your own risk.

    -

    Adobe Photoshop CS6 Extended Crack .DLL Files 32bit 64bit free download


    Download Filehttps://jinyurl.com/2uEnnf



    - -

    Cakewalk SONAR Producer Edition 8.3.1.372 is designed for professional musicians, producers and engineers who need a complete solution for their audio projects. It allows you to record, edit and mix audio tracks with high-quality effects and instruments. You can also use it to create MIDI sequences, loops and beats, as well as to compose and arrange music with virtual instruments and synthesizers. It supports a wide range of audio formats, such as WAV, MP3, WMA, OGG, FLAC and more.

    - -

    One of the main advantages of Cakewalk SONAR Producer Edition 8.3.1.372 is its compatibility with both 32-bit and 64-bit operating systems. This means that you can use more memory and processing power for your audio projects, resulting in better performance and stability. You can also work with larger and more complex projects without worrying about crashes or slowdowns. Moreover, you can use both 32-bit and 64-bit plug-ins in the same project, giving you more flexibility and options for your sound design.

    - -

    If you want to try Cakewalk SONAR Producer Edition 8.3.1.372 for yourself, you can download it from various online sources, such as hotfile, torrent or megaupload. These sites offer fast and easy downloads of the software, as well as free access to it. However, you should be aware that downloading software from these sites may be illegal or unsafe, as they may contain viruses, malware or spyware that could harm your computer or compromise your personal information. Therefore, you should always scan the downloaded files with a reliable antivirus program before installing them. Alternatively, you can buy the software from the official website of Cakewalk or from authorized dealers.

    -

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/evaluate-comparison/mcnemar/README.md b/spaces/evaluate-comparison/mcnemar/README.md deleted file mode 100644 index 1ceaaee211a3abb180cd0f4e3eb82459523c8a75..0000000000000000000000000000000000000000 --- a/spaces/evaluate-comparison/mcnemar/README.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: McNemar -emoji: 🤗 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.0.2 -app_file: app.py -pinned: false -tags: -- evaluate -- comparison -description: >- - McNemar's test is a diagnostic test over a contingency table resulting from the predictions of two classifiers. The test compares the sensitivity and specificity of the diagnostic tests on the same group reference labels. It can be computed with: - McNemar = (SE - SP)**2 / SE + SP - Where: - SE: Sensitivity (Test 1 positive; Test 2 negative) - SP: Specificity (Test 1 negative; Test 2 positive) ---- - - -# Comparison Card for McNemar - -## Comparison description - -McNemar's test is a non-parametric diagnostic test over a contingency table resulting from the predictions of two classifiers. The test compares the sensitivity and specificity of the diagnostic tests on the same group reference labels. It can be computed with: - -McNemar = (SE - SP)**2 / SE + SP - -Where: -* SE: Sensitivity (Test 1 positive; Test 2 negative) -* SP: Specificity (Test 1 negative; Test 2 positive) - -In other words, SE and SP are the diagonal elements of the contingency table for the classifier predictions (`predictions1` and `predictions2`) with respect to the ground truth `references`. - -## How to use - -The McNemar comparison calculates the proportions of responses that exhibit disagreement between two classifiers. It is used to analyze paired nominal data. - -## Inputs - -Its arguments are: - -`predictions1`: a list of predictions from the first model. - -`predictions2`: a list of predictions from the second model. - -`references`: a list of the ground truth reference labels. - -## Output values - -The McNemar comparison outputs two things: - -`stat`: The McNemar statistic. - -`p`: The p value. - -## Examples - -Example comparison: - -```python -mcnemar = evaluate.load("mcnemar") -results = mcnemar.compute(references=[1, 0, 1], predictions1=[1, 1, 1], predictions2=[1, 0, 1]) -print(results) -{'stat': 1.0, 'p': 0.31731050786291115} -``` - -## Limitations and bias - -The McNemar test is a non-parametric test, so it has relatively few assumptions (basically only that the observations are independent). It should be used to analyze paired nominal data only. - -## Citations - -```bibtex -@article{mcnemar1947note, - title={Note on the sampling error of the difference between correlated proportions or percentages}, - author={McNemar, Quinn}, - journal={Psychometrika}, - volume={12}, - number={2}, - pages={153--157}, - year={1947}, - publisher={Springer-Verlag} -} -``` diff --git a/spaces/faizhalas/coconut/Home.py b/spaces/faizhalas/coconut/Home.py deleted file mode 100644 index de1ab673c5073f37975c676c5132d611d9c81b08..0000000000000000000000000000000000000000 --- a/spaces/faizhalas/coconut/Home.py +++ /dev/null @@ -1,232 +0,0 @@ -#import module -import streamlit as st -from PIL import Image - -#===config=== -st.set_page_config( - page_title="Coconut", - page_icon="🥥", - layout="wide" -) -st.title('🥥 Coconut Library Tool') -hide_streamlit_style = """ - - """ -st.markdown(hide_streamlit_style, unsafe_allow_html=True) - -st.sidebar.success('Select page above') - -#===page=== -mt1, mt2, mt3 = st.tabs(["About", "How to", "Behind this app"]) - -with mt1: - st.header("Hello and welcome to the Coconut Library Tool!") - st.write("The coconut tree is known as one of the most useful trees. Each part of this important tree has an integral function from the leaves producing oxygen through photosynthesis to the shells, oil, wood, flowers, and husks being used in a variety of ways, such as building houses, cooking, and more.") - st.write("Our philosophy aspires to emulate this highly cohesive and functionally unified environment where each part serves a specific function to the greater whole. 🌴 Just like the coconut tree, the Coconut Library Tool is the all-in-one data mining and textual analysis tool for librarians or anyone interested in these applications. Our tool does not require any prior knowledge of coding or programming, making it approachable and great for users who want to test out these data analysis and visualization techniques.") - st.write("We cannot thank everyone enough for who has assisted in the creation of this tool. Due to each individual’s efforts, science will advance, allowing for multiple analysis and visualization techniques to coexist within this one tool. 🧑🏻‍🤝‍🧑🏾") - st.text('') - st.divider() - st.text('We support Scopus, Web of Science, Lens, as well as personalized CSV files. Further information can be found in the "How to" section.') - st.text('') - st.divider() - st.write('To cite the Coconut Library Tool, please use the following reference:') - st.info(" Santosa, Faizhal Arif, George, Crissandra J., & Lamba, Manika. (2023). Coconut Library Tool (1.0.0). Zenodo. https://doi.org/10.5281/zenodo.8323458", icon="✍️") - -with mt2: - st.header("Before you start") - option = st.selectbox( - 'Please choose....', - ('Keyword Stem', 'Topic Modeling', 'Bidirected Network', 'Sunburst')) - - if option == 'Keyword Stem': - tab1, tab2, tab3, tab4 = st.tabs(["Prologue", "Steps", "Requirements", "Download Result"]) - with tab1: - st.write("This approach is effective for locating basic words and aids in catching the true meaning of the word, which can lead to improved semantic analysis and comprehension of the text. Some people find it difficult to check keywords before performing bibliometrics (using software such as VOSviewer and Bibliometrix). This strategy makes it easy to combine and search for fundamental words from keywords, especially if you have a large number of keywords. To do stemming or lemmatization on other text, change the column name to 'Keyword' in your file.") - st.divider() - st.write('💡 The idea came from this:') - st.write('Santosa, F. A. (2022). Prior steps into knowledge mapping: Text mining application and comparison. Issues in Science and Technology Librarianship, 102. https://doi.org/10.29173/istl2736') - - with tab2: - st.text("1. Put your file.") - st.text("2. Choose your preferable method. Picture below may help you to choose wisely.") - st.markdown("![Source: https://studymachinelearning.com/stemming-and-lemmatization/](https://studymachinelearning.com/wp-content/uploads/2019/09/stemmin_lemm_ex-1.png)") - st.text('Source: https://studymachinelearning.com/stemming-and-lemmatization/') - st.text("3. Now you need to select what kind of keywords you need.") - st.text("4. Finally, you can download and use the file on VOSviewer, Bibliometrix, or put it on OpenRefine to get better result!") - st.error("Please check what has changed. It's possible some keywords failed to find their roots.", icon="🚨") - - with tab3: - st.text(""" - +----------------+------------------------+---------------------------------+ - | Source | File Type | Column | - +----------------+------------------------+---------------------------------+ - | Scopus | Comma-separated values | Author Keywords | - | | (.csv) | Index Keywords | - +----------------+------------------------+---------------------------------+ - | Web of Science | Tab delimited file | Author Keywords | - | | (.txt) | Keywords Plus | - +----------------+------------------------+---------------------------------+ - | Lens.org | Comma-separated values | Keywords (Scholarly Works) | - | | (.csv) | | - +----------------+------------------------+---------------------------------+ - | Other | .csv | Change your column to 'Keyword' | - +----------------+------------------------+---------------------------------+ - """) - - with tab4: - st.subheader(':blue[Result]') - st.button('Press to download result 👈') - st.text("Go to Result and click Download button.") - - st.divider() - st.subheader(':blue[List of Keywords]') - st.button('Press to download keywords 👈') - st.text("Go to List of Keywords and click Download button.") - - elif option == 'Topic Modeling': - tab1, tab2, tab3, tab4 = st.tabs(["Prologue", "Steps", "Requirements", "Download Visualization"]) - with tab1: - st.write("Topic modeling has numerous advantages for librarians in different aspects of their work. A crucial benefit is an ability to quickly organize and categorize a huge volume of textual content found in websites, institutional archives, databases, emails, and reference desk questions. Librarians can use topic modeling approaches to automatically identify the primary themes or topics within these documents, making navigating and retrieving relevant information easier. Librarians can identify and understand the prevailing topics of discussion by analyzing text data with topic modeling tools, allowing them to assess user feedback, tailor their services to meet specific needs and make informed decisions about collection development and resource allocation. Making ontologies, automatic subject classification, recommendation services, bibliometrics, altmetrics, and better resource searching and retrieval are a few examples of topic modeling. To do topic modeling on other text like chats and surveys, change the column name to 'Abstract' in your file.") - st.divider() - st.write('💡 The idea came from this:') - st.write('Lamba, M., & Madhusudhan, M. (2021, July 31). Topic Modeling. Text Mining for Information Professionals, 105–137. https://doi.org/10.1007/978-3-030-85085-2_4') - - with tab2: - st.text("1. Put your file. We use abstract column for this process.") - st.text("2. Choose your preferred method. LDA is the most widely used, whereas Biterm is appropriate for short text, and BERTopic works well for large text data as well as supports more than 50+ languages.") - st.text("3. Finally, you can visualize your data.") - st.error("This app includes lemmatization and stopwords for the abstract text. Currently, we only offer English words.", icon="💬") - st.error("If you want to see the topic on another data (chats, questionnaire, or other text), change the column name of your table to 'Abstract'.", icon="🚨") - - with tab3: - st.text(""" - +----------------+------------------------+----------------------------------+ - | Source | File Type | Column | - +----------------+------------------------+----------------------------------+ - | Scopus | Comma-separated values | Abstract | - | | (.csv) | | - +----------------+------------------------+----------------------------------+ - | Web of Science | Tab delimited file | Abstract | - | | (.txt) | | - +----------------+------------------------+----------------------------------+ - | Lens.org | Comma-separated values | Abstract (Scholarly Works) | - | | (.csv) | | - +----------------+------------------------+----------------------------------+ - | Other | .csv | Change your column to 'Abstract' | - +----------------+------------------------+----------------------------------+ - """) - - with tab4: - st.subheader(':blue[pyLDA]') - st.button('Download image') - st.text("Click Download Image button.") - - st.divider() - st.subheader(':blue[Biterm]') - st.text("Click the three dots at the top right then select the desired format.") - st.markdown("![Downloading visualization](https://raw.githubusercontent.com/faizhalas/library-tools/main/images/download_biterm.jpg)") - - st.divider() - st.subheader(':blue[BERTopic]') - st.text("Click the camera icon on the top right menu") - st.markdown("![Downloading visualization](https://raw.githubusercontent.com/faizhalas/library-tools/main/images/download_bertopic.jpg)") - - elif option == 'Bidirected Network': - tab1, tab2, tab3, tab4 = st.tabs(["Prologue", "Steps", "Requirements", "Download Graph"]) - with tab1: - st.write("The use of network text analysis by librarians can be quite beneficial. Finding hidden correlations and connections in textual material is a significant advantage. Using network text analysis tools, librarians can improve knowledge discovery, obtain deeper insights, and support scholars meaningfully, ultimately enhancing the library's services and resources. This menu provides a two-way relationship instead of the general network of relationships to enhance the co-word analysis. Since it is based on ARM, you may obtain transactional data information using this menu. Please name the column in your file 'Keyword' instead.") - st.divider() - st.write('💡 The idea came from this:') - st.write('Santosa, F. A. (2023). Adding Perspective to the Bibliometric Mapping Using Bidirected Graph. Open Information Science, 7(1), 20220152. https://doi.org/10.1515/opis-2022-0152') - - with tab2: - st.text("1. Put your file.") - st.text("2. Choose your preferable method. Picture below may help you to choose wisely.") - st.markdown("![Source: https://studymachinelearning.com/stemming-and-lemmatization/](https://studymachinelearning.com/wp-content/uploads/2019/09/stemmin_lemm_ex-1.png)") - st.text('Source: https://studymachinelearning.com/stemming-and-lemmatization/') - st.text("3. Choose the value of Support and Confidence. If you're not sure how to use it please read the article above or just try it!") - st.text("4. You can see the table and a simple visualization before making a network visualization.") - st.text('5. Click "Generate network visualization" to see the network') - st.error("The more data on your table, the more you'll see on network.", icon="🚨") - st.error("If the table contains many rows, the network will take more time to process. Please use it efficiently.", icon="⌛") - - with tab3: - st.text(""" - +----------------+------------------------+---------------------------------+ - | Source | File Type | Column | - +----------------+------------------------+---------------------------------+ - | Scopus | Comma-separated values | Author Keywords | - | | (.csv) | Index Keywords | - +----------------+------------------------+---------------------------------+ - | Web of Science | Tab delimited file | Author Keywords | - | | (.txt) | Keywords Plus | - +----------------+------------------------+---------------------------------+ - | Lens.org | Comma-separated values | Keywords (Scholarly Works) | - | | (.csv) | | - +----------------+------------------------+---------------------------------+ - | Other | .csv | Change your column to 'Keyword' | - | | | and separate the words with ';' | - +----------------+------------------------+---------------------------------+ - """) - - with tab4: - st.subheader(':blue[Bidirected Network]') - st.text("Zoom in, zoom out, or shift the nodes as desired, then right-click and select Save image as ...") - st.markdown("![Downloading graph](https://raw.githubusercontent.com/faizhalas/library-tools/main/images/download_bidirected.jpg)") - - - elif option == 'Sunburst': - tab1, tab2, tab3, tab4 = st.tabs(["Prologue", "Steps", "Requirements", "Download Visualization"]) - with tab1: - st.write("Sunburst's ability to present a thorough and intuitive picture of complex hierarchical data is an essential benefit. Librarians can easily browse and grasp the relationships between different levels of the hierarchy by employing sunburst visualizations. Sunburst visualizations can also be interactive, letting librarians and users drill down into certain categories or subcategories for further information. This interactive and visually appealing depiction improves the librarian's understanding of the collection and provides users with an engaging and user-friendly experience, resulting in improved information retrieval and decision-making.") - - with tab2: - st.text("1. Put your Scopus CSV file.") - st.text("2. You can set the range of years to see how it changed.") - st.text("3. The sunburst has 3 levels. The inner circle is the type of data, meanwhile, the middle is the source title and the outer is the year the article was published.") - st.text("4. The size of the slice depends on total documents. The average of inner and middle levels is calculated by formula below:") - st.code('avg = sum(a * weights) / sum(weights)', language='python') - - with tab3: - st.text(""" - +----------------+------------------------+--------------------+ - | Source | File Type | Column | - +----------------+------------------------+--------------------+ - | Scopus | Comma-separated values | Source title, | - | | (.csv) | Document Type, | - +----------------+------------------------| Cited by, Year | - | Web of Science | Tab delimited file | | - | | (.txt) | | - +----------------+------------------------+--------------------+ - | Lens.org | Comma-separated values | Publication Year, | - | | (.csv) | Publication Type, | - | | | Source Title, | - | | | Citing Works Count | - +----------------+------------------------+--------------------+ - """) - - with tab4: - st.subheader(':blue[Sunburst]') - st.text("Click the camera icon on the top right menu") - st.markdown("![Downloading visualization](https://raw.githubusercontent.com/faizhalas/library-tools/main/images/download_bertopic.jpg)") - - -with mt3: - st.header('Behind this app') - st.subheader('Faizhal Arif Santosa') - st.text('Academic Librarian. Polytechnic Institute of Nuclear Technology, National Research and Innovation Agency.') - st.text('') - st.subheader('Crissandra George') - st.text('Digital Collections Manager Librarian. Case Western Reserve University.') - st.text('') - st.divider() - st.header('Advisor') - st.subheader('Dr. Manika Lamba') - st.text('Postdoctoral Research Associate. University of Illinois Urbana-Champaign.') - st.text('') - st.text('') - st.divider() - st.text('If you want to take a part, please let us know!') \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Beyonce 4 Zip Mediafire.md b/spaces/falterWliame/Face_Mask_Detection/Beyonce 4 Zip Mediafire.md deleted file mode 100644 index 686e46a545f242ad1d2679ff41250c0f7a05a1d1..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Beyonce 4 Zip Mediafire.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Beyonce 4 Zip Mediafire


    Download Zip ☆☆☆☆☆ https://urlca.com/2uDcQO



    - -July 8, 2012 - Beyoncé (album) [2013] Track list: 1. Pretty Hurts 2. Haunted 3. Drunk In Love (Feat. Jay-Z) 4. Blow 5. No Angel 6. Partition 7. Jealous (Feat. JAY-Z) 8. Steven (Feat. Eminem) 9. Lay It On Me (Feat. Beyoncé) 10. Say My Name 11. Own It (Feat. Akon) 12. Drunk In Love ( Acoustic Version) 13. Drunk In Love (Radio Edit) 14. Partition (The Intro Edit) 15. Drunk In Love (Instrumental) 16. Partition (Album Mix) 17. Drunk In Love (Extended Mix) 18. Partition (DJ Quik) Remix) 19. Partition (Pete Tong Remix) 20. Partition (Travis Garland Remix) 21. Partition (Sugar Hill Remix) 22. Partition (Album Mix) 23. Partition (Album Remix) 14. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/HD Online Player (KANAVU.MALAYALAM.B.GRADE.MOVIE.(MALL).md b/spaces/falterWliame/Face_Mask_Detection/HD Online Player (KANAVU.MALAYALAM.B.GRADE.MOVIE.(MALL).md deleted file mode 100644 index 47e8610b32b4fad3ecc3776e3ecee6d73939fbc3..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/HD Online Player (KANAVU.MALAYALAM.B.GRADE.MOVIE.(MALL).md +++ /dev/null @@ -1,6 +0,0 @@ -

    HD Online Player (KANAVU.MALAYALAM.B.GRADE.MOVIE.(MALL)


    Download Ziphttps://urlca.com/2uDdMc



    - -... 1 http://k.filed.site/Kanavu-malayalam-movie-hot-scenes.html 2021-01-15 Daily 1 ... 1 http://k.filed.site/12-menit-untuk-selamanya-full-movie-hd.html 2021-01-15 ... http://k.filed.site/Glee-season-finale-full-episode-online.html 2021-01-15 Daily ... s-best-players-reflect-on-the-fathers-who-inspired-them-to-love-the-game.html ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/Ishaqzaade Movie Download 720p Kickass.md b/spaces/falterWliame/Face_Mask_Detection/Ishaqzaade Movie Download 720p Kickass.md deleted file mode 100644 index 0c46f4dc3c672386d9836a59b0d1708cbbc606c3..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Ishaqzaade Movie Download 720p Kickass.md +++ /dev/null @@ -1,93 +0,0 @@ -
    -

    Ishaqzaade Movie Download 720p Kickass

    -

    Ishaqzaade is a 2012 Bollywood movie that tells the story of a Hindu man and a Muslim woman who share a forbidden romance while fighting for the political supremacy of their respective families. The movie stars Arjun Kapoor and Parineeti Chopra in their debut roles, along with Gauahar Khan and Anil Rastogi. The movie is directed by Habib Faisal and produced by Yash Raj Films. Ishaqzaade received positive reviews from critics and audiences, and was a commercial success at the box office.

    -

    Ishaqzaade movie download 720p kickass


    DOWNLOADhttps://urlca.com/2uDccz



    -

    Why Should You Watch Ishaqzaade?

    -

    Ishaqzaade is a movie that offers a lot of entertainment and emotion for the viewers. Here are some reasons why you should watch Ishaqzaade:

    -
      -
    • It has a gripping and realistic plot that deals with the sensitive issue of honor killings and communal violence in India.
    • -
    • It has a brilliant performance by the lead actors, who showcase their chemistry, charisma, and talent in their first movie.
    • -
    • It has a catchy and melodious soundtrack composed by Amit Trivedi, with songs like "Ishaqzaade", "Pareshaan", and "Chokra Jawaan".
    • -
    • It has a stunning cinematography and art direction that capture the rustic and colorful atmosphere of Uttar Pradesh.
    • -
    • It has a powerful message of love, courage, and sacrifice that will touch your heart.
    • -
    -

    How to Download Ishaqzaade Movie in 720p Quality?

    -

    If you want to enjoy Ishaqzaade movie in high quality, you can download it in 720p resolution from the internet. 720p is a video format that has a resolution of 1280x720 pixels, which offers a clear and sharp picture quality. To download Ishaqzaade movie in 720p quality, you can follow these steps:

    -
      -
    1. Visit a website that provides Ishaqzaade movie download in 720p quality, such as Ocean of Movies, Online Movies Hindi, DocsLib, or Sway Office.
    2. -
    3. Search for Ishaqzaade movie using the keyword "Ishaqzaade movie download 720p kickass" or similar terms.
    4. -
    5. Select the link that matches your preference and click on it.
    6. -
    7. Wait for the download page to load and click on the download button or link.
    8. -
    9. Choose the location where you want to save the movie file on your computer or device.
    10. -
    11. Wait for the download process to complete and enjoy watching Ishaqzaade movie in 720p quality.
    12. -
    -

    What are the Precautions to Take While Downloading Ishaqzaade Movie?

    -

    While downloading Ishaqzaade movie from the internet, you should take some precautions to avoid any problems or risks. Here are some precautions to take while downloading Ishaqzaade movie:

    -
      -
    • Make sure that you have a reliable and fast internet connection to avoid interruptions or errors during the download process.
    • -
    • Make sure that you have enough storage space on your computer or device to accommodate the movie file size.
    • -
    • Make sure that you have a compatible media player or software to play the movie file format.
    • -
    • Make sure that you have an antivirus or malware protection program to scan the movie file for any viruses or malicious content.
    • -
    • Make sure that you respect the copyright laws and regulations of your country and do not distribute or share the movie file without permission.
    • -
    -

    Conclusion

    -

    Ishaqzaade is a movie that you should not miss if you are a fan of Bollywood movies or romantic dramas. It is a movie that will entertain you, move you, and inspire you with its story, performance, music, visuals, and message. You can download Ishaqzaade movie in 720p quality from various websites using the keyword "Ishaqzaade movie download 720p kickass" or similar terms. However, you should take some precautions while downloading Ishaqzaade movie to ensure a safe and smooth experience. We hope that this article has helped you to know more about Ishaqzaade movie and how to download it in 720p quality.

    -

    What are the Reviews and Ratings of Ishaqzaade Movie?

    -

    Ishaqzaade movie has received positive reviews and ratings from critics and audiences alike. The movie has been praised for its engaging and realistic plot, its impressive and debut performance by the lead actors, its catchy and melodious soundtrack, its stunning and colorful cinematography, and its powerful and touching message. The movie has also been appreciated for its bold and honest portrayal of the sensitive issue of honor killings and communal violence in India.

    -

    Ishaqzaade movie has a rating of 6.5 out of 10 on IMDb, based on 53 votes. The movie has a rating of 3.5 out of 5 on Times of India, based on 16 reviews. The movie has a rating of 4 out of 5 on Bollywood Hungama, based on 9 reviews. The movie has a rating of 3.8 out of 5 on BookMyShow, based on 1,078 ratings.

    -

    -

    What are the Awards and Nominations of Ishaqzaade Movie?

    -

    Ishaqzaade movie has won several awards and nominations for its outstanding achievements in various categories. The movie has won awards for its direction, screenplay, music, editing, sound design, choreography, and performance. The movie has also been nominated for its story, cinematography, art direction, costume design, dialogue, and lyrics. Here are some of the awards and nominations of Ishaqzaade movie:

    -
      -
    • The movie won the National Film Award for Best Popular Film Providing Wholesome Entertainment in 2013.
    • -
    • The movie won the Filmfare Award for Best Male Debut for Arjun Kapoor and Best Female Debut for Parineeti Chopra in 2013.
    • -
    • The movie won the IIFA Award for Best Debut Male for Arjun Kapoor and Best Debut Female for Parineeti Chopra in 2013.
    • -
    • The movie won the Screen Award for Most Promising Newcomer Male for Arjun Kapoor and Most Promising Newcomer Female for Parineeti Chopra in 2013.
    • -
    • The movie won the Zee Cine Award for Best Debut Male for Arjun Kapoor and Best Debut Female for Parineeti Chopra in 2013.
    • -
    • The movie was nominated for the Filmfare Award for Best Film, Best Director, Best Music Director, Best Lyricist, Best Playback Singer Male, Best Playback Singer Female, Best Editing, Best Sound Design, and Best Choreography in 2013.
    • -
    • The movie was nominated for the IIFA Award for Best Film, Best Director, Best Story, Best Screenplay, Best Dialogue, Best Music Director, Best Lyricist, Best Playback Singer Male, Best Playback Singer Female, Best Editing, Best Sound Designing, and Best Choreography in 2013.
    • -
    • The movie was nominated for the Screen Award for Best Film, Best Director, Best Actor Male Popular Choice, Best Actor Female Popular Choice, Best Music Director, Best Lyricist, Best Playback Singer Male Popular Choice, Best Playback Singer Female Popular Choice, Best Cinematography, Best Art Direction, and Best Costume Design in 2013.
    • -
    • The movie was nominated for the Zee Cine Award for Best Film Critics' Choice ,Best Director Critics' Choice ,Best Actor Male Critics' Choice ,Best Actor Female Critics' Choice ,Best Music Director ,Best Lyricist ,Best Playback Singer Male ,Best Playback Singer Female ,Best Editing ,Best Sound Design ,and Best Choreography in 2013.
    • -
    -

    What are the Interesting Facts and Trivia of Ishaqzaade Movie?

    -

    Ishaqzaade movie has some interesting facts and trivia that you may not know. Here are some of them:

    -
      -
    • The movie was originally titled "Tewar" which means attitude in Hindi.
    • -
    • The movie was shot in various locations in Uttar Pradesh, such as Lucknow, Hardoi, and Barabanki.
    • -
    • The movie was inspired by the real-life story of a Hindu boy and a Muslim girl who eloped and were killed by their families in 2010.
    • -
    • The movie marked the debut of Arjun Kapoor, who is the son of producer Boney Kapoor and the nephew of actor Anil Kapoor.
    • -
    • The movie marked the second film of Parineeti Chopra, who is the cousin of actress Priyanka Chopra.
    • -
    • The movie featured a cameo appearance by actor Rishi Kapoor, who played the role of a police officer.
    • -
    • The movie had a special screening for the President of India Pranab Mukherjee at the Rashtrapati Bhavan in 2012.
    • -
    -

    What are the Similar Movies to Ishaqzaade?

    -

    If you liked Ishaqzaade movie, you may also like some similar movies that have the same genre, theme, or style. Here are some of the similar movies to Ishaqzaade:

    -
      -
    • Goliyon Ki Raasleela Ram-Leela: A 2013 movie that tells the story of a Hindu man and a Muslim woman who fall in love amid a violent feud between their families.
    • -
    • Qayamat Se Qayamat Tak: A 1988 movie that tells the story of a Hindu boy and a Muslim girl who elope and face the wrath of their families.
    • -
    • Ishq: A 1997 movie that tells the story of four lovers who defy their parents and society to be together.
    • -
    • Raanjhanaa: A 2013 movie that tells the story of a Hindu boy who falls in love with a Muslim girl and follows her to Delhi.
    • -
    • Bombay: A 1995 movie that tells the story of a Hindu man and a Muslim woman who marry and face communal riots in Mumbai.
    • -
    -

    What are the Benefits of Downloading Ishaqzaade Movie in 720p Quality?

    -

    Downloading Ishaqzaade movie in 720p quality has some benefits that you may enjoy. Here are some of them:

    -
      -
    • You can watch Ishaqzaade movie in high definition quality, which enhances your viewing experience and enjoyment.
    • -
    • You can save Ishaqzaade movie in your computer or device and watch it anytime and anywhere you want.
    • -
    • You can share Ishaqzaade movie with your friends and family and enjoy it together.
    • -
    • You can avoid the hassle of going to the theater or renting a DVD and save your time and money.
    • -
    • You can support the makers of Ishaqzaade movie by downloading it from a legal and authorized source.
    • -
    -

    What are the Risks of Downloading Ishaqzaade Movie in 720p Quality?

    -

    Downloading Ishaqzaade movie in 720p quality also has some risks that you should be aware of. Here are some of them:

    -
      -
    • You may download a fake or corrupted file that may damage your computer or device or contain viruses or malware.
    • -
    • You may download a low-quality or incomplete file that may ruin your viewing experience and satisfaction.
    • -
    • You may download a file that has a different language or subtitle than you expected or wanted.
    • -
    • You may download a file that infringes the copyright laws and regulations of your country and face legal consequences.
    • -
    • You may download a file that harms the makers of Ishaqzaade movie by depriving them of their rightful income and recognition.
    • -
    -

    Conclusion

    -

    Ishaqzaade is a movie that you should watch if you love Bollywood movies or romantic dramas. It is a movie that will entertain you, move you, and inspire you with its story, performance, music, visuals, and message. You can download Ishaqzaade movie in 720p quality from various websites using the keyword "Ishaqzaade movie download 720p kickass" or similar terms. However, you should take some precautions while downloading Ishaqzaade movie to ensure a safe and smooth experience. You should also respect the rights and efforts of the makers of Ishaqzaade movie and support them by downloading it from a legal and authorized source. We hope that this article has helped you to know more about Ishaqzaade movie and how to download it in 720p quality.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/QuarkXPress 9.3 Final Multilingual [ChingLiu] Download Pc.md b/spaces/falterWliame/Face_Mask_Detection/QuarkXPress 9.3 Final Multilingual [ChingLiu] Download Pc.md deleted file mode 100644 index b323c617123f9c6d2fb7287b74145093d3e17318..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/QuarkXPress 9.3 Final Multilingual [ChingLiu] Download Pc.md +++ /dev/null @@ -1,11 +0,0 @@ -

    QuarkXPress 9.3 Final Multilingual [ChingLiu] Download Pc


    DOWNLOAD ✪✪✪ https://urlca.com/2uDdy6



    -
    -... download dbgt final bout pc for free , , winnetou melodie ... nero 8 ultra edition 8.3 .2.1 multilingual serial key free download ... Download Nero 8.0.8.10 Rus + crack - Nero 8 is a set of programs for working with CD / DVD discs. -Download Nero 8.0.8.10 Rus + serial - Nero is a complete set of programs for working with CD / DVD discs. -Download Nero 8.0.8.10 + crack (key) - Nero 8 is a set of programs for working with CD / DVD discs. -Nero 8.0.8.10 + crack. -Download from server (4.1Mb) Download from Cloud. -Nero 8.0.8.10 Eng + 8a78ff9644
    -
    -
    -

    diff --git a/spaces/fatiXbelha/sd/Cyprus Division 1 Apoel Nicosia vs APK Karmotissa Live Score and Highlights.md b/spaces/fatiXbelha/sd/Cyprus Division 1 Apoel Nicosia vs APK Karmotissa Live Score and Highlights.md deleted file mode 100644 index aafbf03eb19e36de353c636dd492956216fd5ebe..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Cyprus Division 1 Apoel Nicosia vs APK Karmotissa Live Score and Highlights.md +++ /dev/null @@ -1,149 +0,0 @@ - -

    Apoel nicosia vs apk karmiotissa: A preview of the Cyprus division 1 match and the best 1x2 betting tips

    -

    Introduction

    -

    If you are looking for a thrilling and profitable football match to bet on this weekend, you might want to check out the Cyprus division 1 clash between apoel nicosia and apk karmiotissa. These two teams have different ambitions and expectations for this season, but they both need a win to boost their confidence and morale.

    -

    apoel nicosia - apk karmiotissa cyprus division 1 1x2 - ft (1)


    Download ○○○ https://urllie.com/2uNDCm



    -

    Apoel nicosia are the most successful and popular club in Cyprus, having won 28 league titles, 21 cups, and 14 super cups. They are also the only Cypriot team to reach the quarter-finals of the UEFA Champions League in 2012. They are currently third in the league table with 42 points from 22 games, seven points behind leaders AEL Limassol.

    -

    Apk karmiotissa are a relatively young and modest club, founded in 1979. They play their home games at the Pano Polemidia Community Stadium, which has a capacity of only 3,000 spectators. They are currently ninth in the league table with 24 points from 22 games, just six points above the relegation zone.

    -

    So what is 1x2 betting? It is a simple and popular way of betting on football matches that involves choosing one of three possible outcomes

    for a match: home win (1), draw (X), or away win (2). For example, if you bet on 1x2 - ft (1) for this match, you are betting that apoel nicosia will win the match at full time, which is the end of the regular 90 minutes plus any added time. If they do, you win your bet. If they don't, you lose your bet.

    -

    So why do I think that apoel nicosia will win this match at full time? Well, there are several reasons for that, which I will explain in detail in the following sections. But to summarize, I think that apoel nicosia have a better team, a better form, and a better motivation than apk karmiotissa. I also think that the odds for this bet are quite attractive and offer a good value for money.

    -

    Apoel Nicosia vs APK Karmotissa live score and H2H football match results
    -Apoel Nicosia - APK Karmotissa live streaming and prediction
    -Apoel Nicosia vs APK Karmotissa Cyprus 1st Division, Round 19
    -Apoel Nicosia - APK Karmotissa events, timeline and lineups
    -Apoel Nicosia vs APK Karmotissa full time score and statistics
    -Apoel Nicosia - APK Karmotissa betting odds and tips
    -Apoel Nicosia vs APK Karmotissa head to head history and analysis
    -Apoel Nicosia - APK Karmotissa match preview and news
    -Apoel Nicosia vs APK Karmotissa live updates and commentary
    -Apoel Nicosia - APK Karmotissa highlights and goals video
    -Apoel Nicosia vs APK Karmotissa standings and form guide
    -Apoel Nicosia - APK Karmotissa best players and ratings
    -Apoel Nicosia vs APK Karmotissa venue, date and time
    -Apoel Nicosia - APK Karmotissa referee, manager and team information
    -Apoel Nicosia vs APK Karmotissa latest injury and suspension news
    -Apoel Nicosia - APK Karmotissa expected goals and performance indicators
    -Apoel Nicosia vs APK Karmotissa post-match report and reactions
    -Apoel Nicosia - APK Karmotissa previous and next fixtures
    -Apoel Nicosia vs APK Karmotissa fan opinions and polls
    -Apoel Nicosia - APK Karmotissa live stream availability and options
    -How to watch Apoel Nicosia vs APK Karmotissa online for free
    -Where to bet on Apoel Nicosia vs APK Karmotissa with the best odds
    -Who will win Apoel Nicosia vs APK Karmotissa according to experts and predictions
    -What is the score of Apoel Nicosia vs APK Karmotissa right now
    -When is the next match between Apoel Nicosia and APK Karmotissa
    -Why is Apoel Nicosia vs APK Karmotissa an important game for both teams
    -How to follow Apoel Nicosia vs APK Karmotissa on social media and news outlets
    -Which players are missing for Apoel Nicosia vs APK Karmotissa due to injury or suspension
    -Which players are in form for Apoel Nicosia vs APK Karmotissa based on recent performances
    -Which players are likely to score for Apoel Nicosia vs APK Karmotissa based on statistics
    -How many goals will be scored in Apoel Nicosia vs APK Karmotissa based on trends and averages
    -How many corners will be awarded in Apoel Nicosia vs APK Karmotissa based on tactics and styles of play
    -How many cards will be shown in Apoel Nicosia vs APK Karmotissa based on discipline and referee decisions
    -How many shots will be taken in Apoel Nicosia vs APK Karmotissa based on possession and chances created
    -How many substitutions will be made in Apoel Nicosia vs APK Karmotissa based on fatigue and strategy changes
    -How many offsides will be called in Apoel Nicosia vs APK Karmotissa based on defensive lines and attacking runs
    -How many fouls will be committed in Apoel Nicosia vs APK Karmotissa based on physicality and intensity of the game
    -How many saves will be made in Apoel Nicosia vs APK Karmotissa based on goalkeeper quality and shot accuracy
    -How many passes will be completed in Apoel Nicosia vs APK Karmotissa based on ball retention and distribution skills
    -How many tackles will be won in Apoel Nicosia vs APK Karmotissa based on defensive awareness and positioning

    -

    Apoel nicosia: The dominant force in Cypriot football

    -

    Apoel nicosia are not only the most decorated club in Cyprus, but also one of the most respected and feared teams in Europe. They have regularly participated and performed well in the UEFA competitions, reaching the group stage of the Champions League or the Europa League in nine of the last 11 seasons. They have also faced and beaten some of the biggest clubs in the world, such as Real Madrid, Barcelona, Porto, Lyon, and Ajax.

    -

    Apoel nicosia have a strong and balanced squad, with experienced and talented players in every position. Their captain and star player is Nuno Morais, a Portuguese midfielder who has been with the club since 2007 and has won 19 trophies with them. He is supported by other quality players such as Giorgos Merkis, Tomas De Vincenti, Anuar Tuhami, and Moussa Al Tamari. They play an attractive and effective style of football, based on possession, passing, and pressing.

    -

    Apoel nicosia have been in good form lately, winning four of their last five matches in all competitions. They have scored 12 goals and conceded only three in that span. They have also been dominant at home, winning 10 of their 11 league matches at the GSP Stadium this season. They have scored 28 goals and conceded only six at home. They have a clear goal to win the league title for the 29th time and to qualify for the Champions League next season.

    -

    The only downside for apoel nicosia is that they might have some fatigue or distraction from their midweek match against Olympiacos Nicosia in the cup quarter-finals. They won that match 3-0 away from home, but they might have used some energy and focus that could affect their performance against apk karmiotissa. They also have some injuries and suspensions to deal with, such as Diego Aguirre, Roman Bezjak, and Uros Matic.

    -

    Apk karmiotissa: The underdogs with nothing to lose

    -

    Apk karmiotissa are a relatively new and modest club in Cyprus, having played in the first division only since 2016. They have never won any major trophy or qualified for any European competition. They have a small budget and a limited fan base. They play their home games at a small stadium that often has more away fans than home fans.

    -

    Apk karmiotissa have a weak and inconsistent squad, with few players who can make a difference at this level. Their best player is probably Andreas Kyriakou, a Cypriot forward who has scored seven goals in 20 league games this season. He is followed by other average players such as Nikos Englezou, Andreas Stavrou, and Giorgos Malekkides. They play a defensive and cautious style of football, based on counter-attacks, long balls, and set-pieces.

    -

    Apk karmiotissa have been in poor form lately, losing four of their last five matches in all competitions. They have scored only three goals and conceded 13 in that span. They have also been dreadful away from home, losing nine of their 11 league matches on the road this season. They have scored only eight goals and conceded 25 away from home. They have no realistic chance to challenge for the top spots or to avoid the relegation playoffs.

    -

    The only advantage for apk karmiotissa is that they might have some extra motivation or confidence from their recent win against Ethnikos Achna in the cup quarter-finals. They won that match 2-1 at home after extra time, but they might have also exhausted some energy and resources that could affect their performance against apoel nicosia. They also have some injuries and suspensions to deal with, such as Giorgos Vasiliou, Andreas Elia, and Christos Kallis.

    -

    Head-to-head comparison and previous results

    -

    Another factor that supports my prediction that apoel nicosia will win this match at full time is their head-to-head record and previous results against apk karmiotissa. As you can see from the table below, apoel nicosia have a clear advantage over apk karmiotissa in their recent meetings, winning four out of five matches and scoring 14 goals while conceding only three.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    DateCompetitionResult
    12/12/2020Cyprus division 1Apk karmiotissa 0-4 Apoel nicosia
    08/02/2020Cyprus division 1Apoel nicosia 3-0 Apk karmiotissa
    21/09/2019Cyprus division 1Apk karmiotissa 1-3 Apoel nicosia
    18/03/2017Cyprus division 1Apoel nicosia 4-0 Apk karmiotissa
    29/10/2016Cyprus division 1Apk karmiotissa 2-0 Apoel nicosia
    -

    The last time apk karmiotissa beat apoel nicosia was in 2016, when they pulled off a surprise 2-0 win at home. However, that was a rare and isolated result, as apoel nicosia have dominated this fixture ever since. In fact, apoel nicosia have not conceded a single goal against apk karmiotissa in their last four matches, scoring 14 goals in the process. They have also won their last two matches at apk karmiotissa's stadium by a combined score of 7-0.

    -

    Therefore, based on their head-to-head record and previous results, I think that apoel nicosia have a clear edge over apk karmiotissa and are likely to win this match at full time.

    -

    1x2 betting tips and predictions for apoel nicosia vs apk karmiotissa

    -

    To conclude, I think that apoel nicosia will win this match at full time for the following reasons:

    -
      -
    • They have a better team, a better form, and a better motivation than apk karmiotissa.
    • -
    • They have a strong and balanced squad, with experienced and talented players in every position.
    • -
    • They play an attractive and effective style of football, based on possession, passing, and pressing.
    • -
    • They have been dominant at home, winning 10 of their 11 league matches at the GSP Stadium this season.
    • -
    • They have a clear goal to win the league title for the 29th time and to qualify for the Champions League next season.
    • -
    • They have a clear advantage over apk karmiotissa in their head-to-head record and previous results, winning four out of five matches and scoring 14 goals while conceding only three.
    • -
    • They have not conceded a single goal against apk karmiotissa in their last four matches, scoring 14 goals in the process.
    • -
    • They have won their last two matches at apk karmiotissa's stadium by a combined score of 7-0.
    • -
    -

    Therefore, my prediction for this match is: Apoel nicosia - Apk karmiotissa Cyprus division 1 1x2 - ft (1).

    -

    The odds for this bet are around 1.25, which means that you can win $25 for every $100 that you bet. I think that this is a good value for money, as I believe that the probability of this outcome is much higher than the implied probability of the odds, which is around 80%.

    -

    If you are looking for some alternative bets or options for different scenarios or outcomes, you might want to consider the following:

    -
      -
    • If you think that apoel nicosia will win by more than one goal difference, you can bet on A poel nicosia -1.5 Asian handicap - ft, which means that you are betting that apoel nicosia will win by two or more goals. The odds for this bet are around 1.60, which means that you can win $60 for every $100 that you bet.
    • -
    • If you think that apoel nicosia will score a lot of goals, you can bet on Over 2.5 goals - ft, which means that you are betting that the total number of goals scored by both teams will be more than 2.5. The odds for this bet are around 1.50, which means that you can win $50 for every $100 that you bet.
    • -
    • If you think that apk karmiotissa will put up a fight and score at least one goal, you can bet on Both teams to score - ft, which means that you are betting that both teams will score at least one goal each. The odds for this bet are around 2.00, which means that you can win $100 for every $100 that you bet.
    • -
    -

    Conclusion

    -

    In conclusion, I think that apoel nicosia will win this match at full time against apk karmiotissa, based on their better team, better form, better motivation, and better head-to-head record. I also think that the odds for this bet are quite attractive and offer a good value for money. Therefore, my prediction for this match is: Apoel nicosia - Apk karmiotissa Cyprus division 1 1x2 - ft (1).

    -

    I hope that you have enjoyed reading this article and that you have found it useful and informative. If you are interested in 1x2 betting or other types of football betting, I suggest that you do some more research and analysis before placing your bets, as there are many factors and variables that can affect the outcome of a match. You should also always gamble responsibly and only bet what you can afford to lose.

    -

    FAQs

    -

    Here are some common or relevant questions that readers might have about this topic or 1x2 betting in general:

    -
      -
    1. What is the difference between 1x2 betting and other types of betting?

      -

      1x2 betting is a simple and popular way of betting on football matches that involves choosing one of three possible outcomes for a match: home win (1), draw (X), or away win (2). Other types of betting include handicap betting, which involves giving or taking points or goals from one team to make the match more balanced; over/under betting, which involves betting on the total number of goals scored by both teams; and correct score betting, which involves betting on the exact score of the match.

    2. -
    3. How do I calculate the implied probability of the odds?

      -

      The implied probability of the odds is the percentage chance that the bookmaker thinks that a certain outcome will happen. You can calculate it by using this formula: implied probability = 100 / odds. For example, if the odds for apoel nicosia to win at full time are 1.25, then the implied probability is 100 / 1.25 = 80%. This means that the bookmaker thinks that apoel nicosia have an 80% chance of winning the match at full time.

    4. -
    5. How do I find the best value for money when betting on 1x2?

      -

      The best value for money when betting on 1x2 is when the actual probability of an outcome is higher than the implied probability of the odds. This means that you have an edge over the bookmaker and that you can expect to make a profit in the long run. You can find the best value for money by doing your own research and analysis, comparing different bookmakers and odds, and looking for discrepancies or errors in the market.

    6. -
    7. What are some tips or strategies for 1x2 betting?

      -

      Some tips or strategies for 1x2 betting include:

      -
        -
      • Do your homework and study the teams, players, form, motivation, injuries, suspensions, head-to-head record, and other factors that might affect the outcome of a match.
      • -
      • Look for trends and patterns in the results and performances of the teams, such as home/away advantage, scoring/conceding goals, winning/losing streaks, etc.
      • -
      • Be flexible and adaptable to the changing circumstances and conditions of a match, such as weather, pitch, referee, substitutions, red cards, etc.
      • -
      • Manage your bankroll and budget wisely and responsibly, and only bet what you can afford to lose.
      • -
      • Shop around and compare different bookmakers and odds, and look for the best value for money.
      • -
      • Have fun and enjoy the thrill and excitement of 1x2 betting, but don't let it affect your emotions or judgment.
      • -
    8. -
    9. Where can I find more information or resources about 1x2 betting?

      -

      There are many websites, blogs, forums, podcasts, videos, books, and magazines that offer more information or resources about 1x2 betting. Some of them are:

      -
        -
      • Bettingexpert: A website that provides tips, guides, reviews, and analysis for 1x2 betting and other types of betting.
      • -
      • Oddsportal: A website that compares and displays the odds from different bookmakers for 1x2 betting and other types of betting.
      • -
      • Soccerstats: A website that provides statistics, results, tables, and trends for 1x2 betting and other types of betting.
      • -
      • Betfair: A website that offers a betting exchange platform where you can bet against other people for 1x2 betting and other types of betting.
      • -
      • The Football Betting Podcast: A podcast that discusses and predicts the outcomes of football matches using 1x2 betting and other types of betting.
      • -
    10. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fengmuxi/ChatGpt-Web/app/api/lemur/route.ts b/spaces/fengmuxi/ChatGpt-Web/app/api/lemur/route.ts deleted file mode 100644 index 1d66ddb3313afb41676b0f1c0be9f8b0d020924e..0000000000000000000000000000000000000000 --- a/spaces/fengmuxi/ChatGpt-Web/app/api/lemur/route.ts +++ /dev/null @@ -1,197 +0,0 @@ -// import { createParser } from "eventsource-parser"; -// import { NextRequest, NextResponse } from "next/server"; -// import { auth } from "../auth"; - -// async function createStream(req: NextRequest) { -// const authResult = auth(req); -// if (authResult.error) { -// return authResult.msg; -// } -// const encoder = new TextEncoder(); -// const decoder = new TextDecoder(); - -// const res = await fetch( -// "http://lemurchat.anfans.cn/api/chat/conversation-trial", -// { -// headers: { -// "Content-Type": "application/json", -// }, -// method: "POST", -// body: req.body, -// }, -// ); - -// const stream = new ReadableStream({ -// async start(controller) { -// function onParse(event: any) { -// if (event.type === "event") { -// const data = event.data; -// if (event.id == "1") { -// let text1 = data.slice(data.indexOf("content")); -// const text = text1.slice(12, text1.indexOf("index") - 6); -// const queue = encoder.encode(text); -// controller.enqueue(queue); -// return; -// } -// // https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream -// try { -// const json = JSON.parse(data); -// // console.log(data.indexOf("content")) -// if (data.indexOf("content") == -1) { -// controller.close(); -// return; -// } -// // console.log(event.data) -// const text = JSON.parse(json.data.slice(5)).choices[0].delta -// .content; -// const queue = encoder.encode(text); -// controller.enqueue(queue); -// } catch (e) { -// controller.error(e); -// } -// } -// } - -// const parser = createParser(onParse); -// for await (const chunk of res.body as any) { -// parser.feed(decoder.decode(chunk)); -// } -// }, -// }); -// return stream; -// } - -// export async function POST(req: NextRequest) { -// try { -// const authResult = auth(req); -// if (authResult.error) { -// return NextResponse.json(authResult, { -// status: 401, -// }); -// } -// const stream = await createStream(req); -// return new Response(stream); -// } catch (error) { -// console.error("[Chat Stream]", error); -// } -// } - -// export const config = { -// runtime: "edge", -// }; - - - -import { createParser } from "eventsource-parser"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../auth"; -import { requestLemur} from "../common"; - -async function createStream(res: Response) { - const encoder = new TextEncoder(); - const decoder = new TextDecoder(); - - const stream = new ReadableStream({ - async start(controller) { - function onParse(event: any) { - if (event.type === "event") { - const data = event.data; - // console.log(data) - if (event.id == "1") { - let text1 = data.slice(data.indexOf("content")); - const text = text1.slice(text1.indexOf("data")+6,text1.indexOf("code")-7); - // console.log("123"+text.replaceAll('\\','')) - const queue = encoder.encode(JSON.parse(text.replaceAll('\\','')).choices[0].delta.content); - controller.enqueue(queue); - return; - } - try { - const json = JSON.parse(data); - if (data.indexOf("content") == -1||data.origin=="lemur") { - controller.close(); - return; - } - var str=json.data.split("data:") - let text="" - for(let i=1;i -

    Magic Piano Tiles 3: A Fun and Challenging Music Game

    -

    Do you love music and want to play your favorite songs on the piano? Do you enjoy testing your reflexes and rhythm skills? Do you want to have fun and relax with a simple and addictive game? If you answered yes to any of these questions, then you should try Magic Piano Tiles 3, a popular music game that will keep you entertained for hours.

    -

    What is Magic Piano Tiles 3?

    -

    Magic Piano Tiles 3 is a music game that challenges you to tap the black tiles on the screen while avoiding the white tiles. The game plays different songs from various genres, such as pop, rap, EDM, jazz, instrumental, and more. You can choose from over 1000 songs to play, or create your own playlist. The game also has different modes, such as endless mode, battle mode, band mode, and more. You can play solo or with other players online.

    -

    magic piano tiles 3


    Download Zip ✪✪✪ https://gohhs.com/2uPsV6



    -

    The gameplay and rules of Magic Piano Tiles 3

    -

    The gameplay of Magic Piano Tiles 3 is simple but addictive. You just need to tap the black tiles that appear on the screen in sync with the music. The black tiles represent the notes of the song, and you need to tap them at the right time to play the song correctly. If you miss a black tile or tap a white tile, you will lose the game. The game will speed up as you progress, so you need to be fast and accurate.

    -

    The features and benefits of Magic Piano Tiles 3

    -

    Magic Piano Tiles 3 has many features and benefits that make it a great music game. Some of them are:

    -
      -
    • It has a large collection of songs from different genres and artists. You can find your favorite songs or discover new ones.
    • -
    • It has high-quality graphics and sound effects that make the game more realistic and immersive.
    • -
    • It has various modes that offer different challenges and experiences. You can play endless mode, battle mode, band mode, and more.
    • -
    • It has a social aspect that allows you to connect with other players online. You can chat, compete, or cooperate with them.
    • -
    • It has a VIP subscription that gives you access to exclusive benefits, such as no ads, unlimited songs, free revives, offline mode, and more.
    • -
    -

    How to play Magic Piano Tiles 3 online or offline?

    -

    Magic Piano Tiles 3 is available for both online and offline play. You can play it on your phone, tablet, laptop, or TV. Here are the steps to download and install Magic Piano Tiles 3 on different devices:

    -

    The steps to download and install Magic Piano Tiles 3 on different devices

    - - - - - - -
    DeviceSteps
    Phone or tablet (Android)
    1. Go to Google Play Store and search for Magic Tiles 3.
    2. Tap on Install and wait for the download to finish.
    3. Open the app and enjoy playing.
    Phone or tablet (iOS)
    1. Go to App Store and search for Magic Tiles 3: Piano Game.
    2. Tap on Get and wait for the download to finish.
    3. Open the app and enjoy playing.
    Laptop (Windows or Mac)
    1. Go to the official website of Magic Tiles 3 and click on Download.
    2. Choose your operating system (Windows or Mac) and follow the instructions to install the game.
    3. Open the game and enjoy playing.
    TV (Android TV or Fire TV)
    1. Go to the app store of your TV and search for Magic Tiles 3.
    2. Download and install the app on your TV.
    3. Open the app and enjoy playing.
    -

    The tips and tricks to improve your skills and score in Magic Piano Tiles 3

    -

    If you want to master Magic Piano Tiles 3 and get high scores, you need to practice and follow some tips and tricks. Here are some of them:

    -
      -
    • Use your fingers instead of your thumbs to tap the tiles. This will give you more speed and accuracy.
    • -
    • Focus on the rhythm and timing of the music. Try to tap the tiles in sync with the beat and melody.
    • -
    • Don't look at the score or the time. This will distract you and make you nervous. Just focus on the tiles and the music.
    • -
    • Don't panic if you miss a tile or tap a wrong tile. Just keep calm and continue playing. You can use revives or watch ads to resume the game.
    • -
    • Play different songs and modes to challenge yourself and learn new skills. You can also play with other players online to compare your performance and get feedback.
    • -
    -

    Why should you play Magic Piano Tiles 3?

    -

    Magic Piano Tiles 3 is not only a fun and challenging game, but also a beneficial one. Playing Magic Piano Tiles 3 can have many advantages for your brain and mood. Here are some of them:

    -

    The advantages of playing Magic Piano Tiles 3 for your brain and mood

    -
      -
    • It improves your cognitive abilities, such as memory, attention, concentration, coordination, and reaction time. Playing Magic Piano Tiles 3 requires you to process multiple stimuli at once, which enhances your brain function.
    • -
    • It boosts your creativity and musical intelligence. Playing Magic Piano Tiles 3 exposes you to different genres and styles of music, which stimulates your imagination and musical sense.
    • -
    • It reduces your stress and anxiety levels. Playing Magic Piano Tiles 3 can help you relax and calm down, as music has a soothing effect on your mind and body.
    • -
    • It increases your happiness and satisfaction. Playing Magic Piano Tiles 3 can make you feel happy and proud, as you achieve your goals and play your favorite songs.
    • -
    -

    The testimonials and reviews of Magic Piano Tiles 3 from other players

    -

    Magic Piano Tiles 3 has received many positive testimonials and reviews from other players who have enjoyed playing it. Here are some of them:

    -
    "This game is amazing! I love playing it every day. It has so many songs to choose from, and it's very easy to play. It also helps me relax and improve my mood. I highly recommend it!" - Anna, 25
    -
    "I'm addicted to this game! It's so fun and challenging. It tests my reflexes and rhythm skills, and it makes me feel like a real pianist. It also has great graphics and sound effects. It's the best music game ever!" - Leo, 18
    -
    "This game is awesome! I play it with my friends online, and we have a blast. We compete, chat, and cooperate with each other. It's very social and interactive. It also has different modes that offer different experiences. It's a must-have game!" - Mia, 22
    -

    Conclusion

    -

    Magic Piano Tiles 3 is a fun and challenging music game that will keep you entertained for hours. You can play different songs from various genres, choose from different modes, connect with other players online, and enjoy high-quality graphics and sound effects. You can also improve your cognitive abilities, creativity, musical intelligence, mood, and happiness by playing Magic Piano Tiles 3. So what are you waiting for? Download Magic Piano Tiles 3 today and start playing!

    -

    FAQs

    -

    What is the difference between Magic Piano Tiles 3 and other piano games?

    -

    Magic Piano Tiles 3 is different from other piano games in several ways. Some of them are:

    -

    magic piano tiles 3 online
    -magic piano tiles 3 app
    -magic piano tiles 3 game
    -magic piano tiles 3 songs
    -magic piano tiles 3 download
    -magic piano tiles 3 mod apk
    -magic piano tiles 3 free
    -magic piano tiles 3 vip
    -magic piano tiles 3 hack
    -magic piano tiles 3 cheats
    -magic piano tiles 3 review
    -magic piano tiles 3 play store
    -magic piano tiles 3 ios
    -magic piano tiles 3 android
    -magic piano tiles 3 pc
    -magic piano tiles 3 music
    -magic piano tiles 3 levels
    -magic piano tiles 3 challenges
    -magic piano tiles 3 battle mode
    -magic piano tiles 3 band mode
    -magic piano tiles 3 subscription
    -magic piano tiles 3 no ads
    -magic piano tiles 3 offline
    -magic piano tiles 3 update
    -magic piano tiles 3 tips
    -magic piano tiles 3 tricks
    -magic piano tiles 3 guide
    -magic piano tiles 3 tutorial
    -magic piano tiles 3 gameplay
    -magic piano tiles 3 video
    -magic piano tiles 3 youtube
    -magic piano tiles 3 facebook
    -magic piano tiles 3 instagram
    -magic piano tiles 3 twitter
    -magic piano tiles 3 reddit
    -magic piano tiles 3 discord
    -magic piano tiles 3 support
    -magic piano tiles 3 feedback
    -magic piano tiles 3 rating
    -magic piano tiles 3 ranking
    -magic piano tiles 3 leaderboard
    -magic piano tiles 3 tournament
    -magic piano tiles 3 rewards
    -magic piano tiles 3 coins
    -magic piano tiles 3 diamonds
    -magic piano tiles 3 revives
    -magic piano tiles 3 playlist
    -magic piano tiles 3 genres
    -magic piano tiles 3 themes

    -
      -
    • Magic Piano Tiles 3 has more songs than other piano games. You can find over 1000 songs from different genres and artists in Magic Piano Tiles 3, or create your own playlist.
    • -
    • Magic Piano Tiles 3 has more modes than other piano games. You can play endless mode, battle mode, band mode, and more in Magic Piano Tiles 3.
    • -
    • Magic Piano Tiles 3 has more social features than other piano games. You can chat, compete, or cooperate with other players online in Magic Piano Tiles 3.
    • -
    • Magic Piano Tiles 3 has better graphics and sound effects than other piano games. You can enjoy realistic and immersive visuals and sounds in Magic Piano Tiles 3.
    • -
    -

    How can I get more coins and diamonds in Magic Piano Tiles 3?

    -

    Coins and diamonds are the currencies in Magic Piano Tiles 3. You can use them to unlock new songs, modes, themes, and more. You can get more coins and diamonds in Magic Piano Tiles 3 by:

    -
      -
    • Playing the game regularly and completing daily tasks and achievements.
    • -
    • Watching ads or completing offers in the game.
    • -
    • Inviting your friends to play the game and sharing your results on social media.
    • -
    • Purchasing them with real money or subscribing to the VIP membership.
    • -
    -

    How can I change the theme or the background of Magic Piano Tiles 3?

    -

    You can change the theme or the background of Magic Piano Tiles 3 by:

    -
      -
    • Tapping on the settings icon on the main screen of the game.
    • -
    • Tapping on the theme option and choosing from the available themes.
    • -
    • Tapping on the background option and choosing from the available backgrounds.
    • -
    -

    How can I create my own playlist in Magic Piano Tiles 3?

    -

    You can create your own playlist in Magic Piano Tiles 3 by:

    -
      -
    • Tapping on the music icon on the main screen of the game.
    • -
    • Tapping on the playlist option and choosing from the available songs.
    • -
    • Tapping on the add icon to add songs to your playlist.
    • -
    • Tapping on the play icon to play your playlist.
    • -
    -

    How can I contact the support team of Magic Piano Tiles 3?

    -

    You can contact the support team of Magic Piano Tiles 3 by:

    -
      -
    • Tapping on the settings icon on the main screen of the game.
    • -
    • Tapping on the feedback option and filling out the form with your name, email, and message.
    • -
    • Tapping on the send icon to submit your feedback.
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/clap/open_clip/transform.py b/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/clap/open_clip/transform.py deleted file mode 100644 index 77aaa722c4a5544ac50de6df35d3e922f63b111d..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/clap/open_clip/transform.py +++ /dev/null @@ -1,45 +0,0 @@ -from torchvision.transforms import ( - Normalize, - Compose, - RandomResizedCrop, - InterpolationMode, - ToTensor, - Resize, - CenterCrop, -) - - -def _convert_to_rgb(image): - return image.convert("RGB") - - -def image_transform( - image_size: int, - is_train: bool, - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), -): - normalize = Normalize(mean=mean, std=std) - if is_train: - return Compose( - [ - RandomResizedCrop( - image_size, - scale=(0.9, 1.0), - interpolation=InterpolationMode.BICUBIC, - ), - _convert_to_rgb, - ToTensor(), - normalize, - ] - ) - else: - return Compose( - [ - Resize(image_size, interpolation=InterpolationMode.BICUBIC), - CenterCrop(image_size), - _convert_to_rgb, - ToTensor(), - normalize, - ] - ) diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/body-parser/lib/read.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/body-parser/lib/read.js deleted file mode 100644 index fce6283f50961e68c2f576031ed5e3d4fdc39984..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/body-parser/lib/read.js +++ /dev/null @@ -1,205 +0,0 @@ -/*! - * body-parser - * Copyright(c) 2014-2015 Douglas Christopher Wilson - * MIT Licensed - */ - -'use strict' - -/** - * Module dependencies. - * @private - */ - -var createError = require('http-errors') -var destroy = require('destroy') -var getBody = require('raw-body') -var iconv = require('iconv-lite') -var onFinished = require('on-finished') -var unpipe = require('unpipe') -var zlib = require('zlib') - -/** - * Module exports. - */ - -module.exports = read - -/** - * Read a request into a buffer and parse. - * - * @param {object} req - * @param {object} res - * @param {function} next - * @param {function} parse - * @param {function} debug - * @param {object} options - * @private - */ - -function read (req, res, next, parse, debug, options) { - var length - var opts = options - var stream - - // flag as parsed - req._body = true - - // read options - var encoding = opts.encoding !== null - ? opts.encoding - : null - var verify = opts.verify - - try { - // get the content stream - stream = contentstream(req, debug, opts.inflate) - length = stream.length - stream.length = undefined - } catch (err) { - return next(err) - } - - // set raw-body options - opts.length = length - opts.encoding = verify - ? null - : encoding - - // assert charset is supported - if (opts.encoding === null && encoding !== null && !iconv.encodingExists(encoding)) { - return next(createError(415, 'unsupported charset "' + encoding.toUpperCase() + '"', { - charset: encoding.toLowerCase(), - type: 'charset.unsupported' - })) - } - - // read body - debug('read body') - getBody(stream, opts, function (error, body) { - if (error) { - var _error - - if (error.type === 'encoding.unsupported') { - // echo back charset - _error = createError(415, 'unsupported charset "' + encoding.toUpperCase() + '"', { - charset: encoding.toLowerCase(), - type: 'charset.unsupported' - }) - } else { - // set status code on error - _error = createError(400, error) - } - - // unpipe from stream and destroy - if (stream !== req) { - unpipe(req) - destroy(stream, true) - } - - // read off entire request - dump(req, function onfinished () { - next(createError(400, _error)) - }) - return - } - - // verify - if (verify) { - try { - debug('verify body') - verify(req, res, body, encoding) - } catch (err) { - next(createError(403, err, { - body: body, - type: err.type || 'entity.verify.failed' - })) - return - } - } - - // parse - var str = body - try { - debug('parse body') - str = typeof body !== 'string' && encoding !== null - ? iconv.decode(body, encoding) - : body - req.body = parse(str) - } catch (err) { - next(createError(400, err, { - body: str, - type: err.type || 'entity.parse.failed' - })) - return - } - - next() - }) -} - -/** - * Get the content stream of the request. - * - * @param {object} req - * @param {function} debug - * @param {boolean} [inflate=true] - * @return {object} - * @api private - */ - -function contentstream (req, debug, inflate) { - var encoding = (req.headers['content-encoding'] || 'identity').toLowerCase() - var length = req.headers['content-length'] - var stream - - debug('content-encoding "%s"', encoding) - - if (inflate === false && encoding !== 'identity') { - throw createError(415, 'content encoding unsupported', { - encoding: encoding, - type: 'encoding.unsupported' - }) - } - - switch (encoding) { - case 'deflate': - stream = zlib.createInflate() - debug('inflate body') - req.pipe(stream) - break - case 'gzip': - stream = zlib.createGunzip() - debug('gunzip body') - req.pipe(stream) - break - case 'identity': - stream = req - stream.length = length - break - default: - throw createError(415, 'unsupported content encoding "' + encoding + '"', { - encoding: encoding, - type: 'encoding.unsupported' - }) - } - - return stream -} - -/** - * Dump the contents of a request. - * - * @param {object} req - * @param {function} callback - * @api private - */ - -function dump (req, callback) { - if (onFinished.isFinished(req)) { - callback(null) - } else { - onFinished(req, callback) - req.resume() - } -} diff --git a/spaces/fiz123321/dumbcutie/Dockerfile b/spaces/fiz123321/dumbcutie/Dockerfile deleted file mode 100644 index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000 --- a/spaces/fiz123321/dumbcutie/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] diff --git "a/spaces/fkhuggingme/gpt-academic/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" "b/spaces/fkhuggingme/gpt-academic/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" deleted file mode 100644 index 6a7d118b4439605db6e10b9a416a2e725b99a672..0000000000000000000000000000000000000000 --- "a/spaces/fkhuggingme/gpt-academic/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" +++ /dev/null @@ -1,102 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping -import requests -from bs4 import BeautifulSoup -from request_llm.bridge_all import model_info - -def google(query, proxies): - query = query # 在此处替换您要搜索的关键词 - url = f"https://www.google.com/search?q={query}" - headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'} - response = requests.get(url, headers=headers, proxies=proxies) - soup = BeautifulSoup(response.content, 'html.parser') - results = [] - for g in soup.find_all('div', class_='g'): - anchors = g.find_all('a') - if anchors: - link = anchors[0]['href'] - if link.startswith('/url?q='): - link = link[7:] - if not link.startswith('http'): - continue - title = g.find('h3').text - item = {'title': title, 'link': link} - results.append(item) - - for r in results: - print(r['link']) - return results - -def scrape_text(url, proxies) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', - 'Content-Type': 'text/plain', - } - try: - response = requests.get(url, headers=headers, proxies=proxies, timeout=8) - if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding - except: - return "无法连接到该网页" - soup = BeautifulSoup(response.text, "html.parser") - for script in soup(["script", "style"]): - script.extract() - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - return text - -@CatchException -def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append((f"请结合互联网信息回答以下问题:{txt}", - "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第1步:爬取搜索引擎的结果 > ------------- - from toolbox import get_conf - proxies, = get_conf('proxies') - urls = google(txt, proxies) - history = [] - - # ------------- < 第2步:依次访问网页 > ------------- - max_search_result = 5 # 最多收纳多少个网页的结果 - for index, url in enumerate(urls[:max_search_result]): - res = scrape_text(url['link'], proxies) - history.extend([f"第{index}份搜索结果:", res]) - chatbot.append([f"第{index}份搜索结果:", res[:500]+"......"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第3步:ChatGPT综合 > ------------- - i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}" - i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token - inputs=i_say, - history=history, - max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4 - ) - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - diff --git a/spaces/flax-community/SentenceSimplifier/About/datasets.md b/spaces/flax-community/SentenceSimplifier/About/datasets.md deleted file mode 100644 index 9ac12dfd2e2fb406500148410b55495f4c6b75ba..0000000000000000000000000000000000000000 --- a/spaces/flax-community/SentenceSimplifier/About/datasets.md +++ /dev/null @@ -1,3 +0,0 @@ -## Datasets used -* [Wiki Split](https://research.google/tools/datasets/wiki-split/) -* [Web Split](https://github.com/shashiongithub/Split-and-Rephrase) \ No newline at end of file diff --git a/spaces/florim/MedGPT/autogpt/agent/agent.py b/spaces/florim/MedGPT/autogpt/agent/agent.py deleted file mode 100644 index ee7885f8844022597321fa6b492430ec34c0d6b9..0000000000000000000000000000000000000000 --- a/spaces/florim/MedGPT/autogpt/agent/agent.py +++ /dev/null @@ -1,197 +0,0 @@ -from colorama import Fore, Style - -from autogpt.app import execute_command, get_command -from autogpt.chat import chat_with_ai, create_chat_message -from autogpt.config import Config -from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques -from autogpt.json_utils.utilities import validate_json -from autogpt.logs import logger, print_assistant_thoughts -from autogpt.speech import say_text -from autogpt.spinner import Spinner -from autogpt.utils import clean_input - - -class Agent: - """Agent class for interacting with Auto-GPT. - - Attributes: - ai_name: The name of the agent. - memory: The memory object to use. - full_message_history: The full message history. - next_action_count: The number of actions to execute. - system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. - Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals. - - triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is: - Determine which next command to use, and respond using the format specified above: - The triggering prompt is not part of the system prompt because between the system prompt and the triggering - prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve. - SYSTEM PROMPT - CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant) - TRIGGERING PROMPT - - The triggering prompt reminds the AI about its short term meta task (defining the next task) - """ - - def __init__( - self, - ai_name, - memory, - full_message_history, - next_action_count, - system_prompt, - triggering_prompt, - ): - self.ai_name = ai_name - self.memory = memory - self.full_message_history = full_message_history - self.next_action_count = next_action_count - self.system_prompt = system_prompt - self.triggering_prompt = triggering_prompt - - def start_interaction_loop(self): - # Interaction Loop - cfg = Config() - loop_count = 0 - command_name = None - arguments = None - user_input = "" - - while True: - # Discontinue if continuous limit is reached - loop_count += 1 - if ( - cfg.continuous_mode - and cfg.continuous_limit > 0 - and loop_count > cfg.continuous_limit - ): - logger.typewriter_log( - "Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}" - ) - break - - # Send message to AI, get response - with Spinner("Thinking... "): - assistant_reply = chat_with_ai( - self.system_prompt, - self.triggering_prompt, - self.full_message_history, - self.memory, - cfg.fast_token_limit, - ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument - - assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply) - - # Print Assistant thoughts - if assistant_reply_json != {}: - validate_json(assistant_reply_json, "llm_response_format_1") - # Get command name and arguments - try: - print_assistant_thoughts(self.ai_name, assistant_reply_json) - command_name, arguments = get_command(assistant_reply_json) - # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"] - if cfg.speak_mode: - say_text(f"I want to execute {command_name}") - except Exception as e: - logger.error("Error: \n", str(e)) - - if not cfg.continuous_mode and self.next_action_count == 0: - ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### - # Get key press: Prompt the user to press enter to continue or escape - # to exit - logger.typewriter_log( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} " - f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", - ) - print( - "Enter 'y' to authorise command, 'y -N' to run N continuous " - "commands, 'n' to exit program, or enter feedback for " - f"{self.ai_name}...", - flush=True, - ) - while True: - console_input = clean_input( - Fore.MAGENTA + "Input:" + Style.RESET_ALL - ) - if console_input.lower().strip() == "y": - user_input = "GENERATE NEXT COMMAND JSON" - break - elif console_input.lower().strip() == "": - print("Invalid input format.") - continue - elif console_input.lower().startswith("y -"): - try: - self.next_action_count = abs( - int(console_input.split(" ")[1]) - ) - user_input = "GENERATE NEXT COMMAND JSON" - except ValueError: - print( - "Invalid input format. Please enter 'y -n' where n is" - " the number of continuous tasks." - ) - continue - break - elif console_input.lower() == "n": - user_input = "EXIT" - break - else: - user_input = console_input - command_name = "human_feedback" - break - - if user_input == "GENERATE NEXT COMMAND JSON": - logger.typewriter_log( - "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", - Fore.MAGENTA, - "", - ) - elif user_input == "EXIT": - print("Exiting...", flush=True) - break - else: - # Print command - logger.typewriter_log( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}" - f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", - ) - - # Execute command - if command_name is not None and command_name.lower().startswith("error"): - result = ( - f"Command {command_name} threw the following error: {arguments}" - ) - elif command_name == "human_feedback": - result = f"Human feedback: {user_input}" - else: - result = ( - f"Command {command_name} returned: " - f"{execute_command(command_name, arguments)}" - ) - if self.next_action_count > 0: - self.next_action_count -= 1 - - memory_to_add = ( - f"Assistant Reply: {assistant_reply} " - f"\nResult: {result} " - f"\nHuman Feedback: {user_input} " - ) - - self.memory.add(memory_to_add) - - # Check if there's a result from the command append it to the message - # history - if result is not None: - self.full_message_history.append(create_chat_message("system", result)) - logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) - else: - self.full_message_history.append( - create_chat_message("system", "Unable to execute command") - ) - logger.typewriter_log( - "SYSTEM: ", Fore.YELLOW, "Unable to execute command" - ) diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/backup_envs/cointhief.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/backup_envs/cointhief.py deleted file mode 100644 index 271b897e7c688976e05c36f9cc19f3be4b63a886..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/backup_envs/cointhief.py +++ /dev/null @@ -1,431 +0,0 @@ -from gym_minigrid.minigrid import * -from gym_minigrid.register import register -import time -from collections import deque - - -class Thief(NPC): - """ - A dancing NPC that the agent has to copy - NPC executes a sequence of movement and utterances - """ - - def __init__(self, color, name, env, thief_pos, hidden_npc=False, tag_visible_coins=False, view_size=5, look_around=True): - super().__init__(color) - self.name = name - self.npc_type = 0 - self.env = env - self.cur_pos = thief_pos - self.init_pos = thief_pos - self.view_size = view_size - self.npc_dir = self._look_at_agent() # Thief stares at its victim - self.init_dir = self.npc_dir - self.actions = self.env.possible_actions - self.tag_visible_coins = tag_visible_coins - - self.nb_given_coins = None - self.look_around = look_around - if self.look_around: # randomly select in which direction NPC is looking around - if np.random.random() > 0.5: # will look left - self.look = self.rotate_left - self.look_back = self.rotate_right - else: # will look right - self.look = self.rotate_right - self.look_back = self.rotate_left - - self.nb_seen_coins = self._count_coins() # This is how much coins Thief wants, at least - self.add_npc_direction = True - self.nb_steps = 0 - self.hidden_npc = hidden_npc - - def step(self, agent_action, agent_utterance): - agent_disobeyed = False - agent_gave_coins = False - utterance = None - - if self.nb_steps == 0: - utterance = "Freeze! Give me all the coins you have!" - - if self.nb_steps >= 0 and self.look_around: - if self.npc_dir == self.init_dir: # start to look around - self.look() - else: # resume looking to agent - self.look_back() - - if not(agent_utterance is None): - self.nb_given_coins = int(agent_utterance[-2]) - - if self.nb_given_coins >= self.nb_seen_coins: - agent_gave_coins = True - else: # agent gave not enough coins - agent_disobeyed = True - - # The thief forbids the agent to move, apart from looking around (rotating) - if not (np.isnan(agent_action) or agent_action == self.actions.left or agent_action == self.actions.right): - agent_disobeyed = True - - self.nb_steps += 1 - return agent_disobeyed, agent_gave_coins, utterance - - def get_status_str(self): - return "thief sees: {} \n agent gives: {}".format(self.nb_seen_coins, self.nb_given_coins) - - def _count_coins(self): - # get seen coins - coins_pos = self.get_pos_visible_coins() - - if self.look_around: - self.look() - # add coins visible from this new direction - coins_pos += self.get_pos_visible_coins() - # remove coins that we already saw - if len(coins_pos) > 0: - coins_pos = np.unique(coins_pos, axis=0).tolist() - self.look_back() - - return len(coins_pos) - - def _look_at_agent(self): - npc_dir = None - ax, ay = self.env.agent_pos - tx, ty = self.cur_pos - delta_x, delta_y = ax - tx, ay - ty - if delta_x == 1: - npc_dir = 0 - elif delta_x == -1: - npc_dir = 2 - elif delta_y == 1: - npc_dir = 1 - elif delta_y == -1: - npc_dir = 3 - else: - raise NotImplementedError - - return npc_dir - - def gen_npc_obs_grid(self): - """ - Generate the sub-grid observed by the npc. - This method also outputs a visibility mask telling us which grid - cells the npc can actually see. - """ - view_size = self.view_size - - topX, topY, botX, botY = self.env.get_view_exts(dir=self.npc_dir, view_size=view_size, pos=self.cur_pos) - - grid = self.env.grid.slice(topX, topY, view_size, view_size) - - for i in range(self.npc_dir + 1): - grid = grid.rotate_left() - - # Process occluders and visibility - # Note that this incurs some performance cost - if not self.env.see_through_walls: - vis_mask = grid.process_vis(agent_pos=(view_size // 2, view_size - 1)) - else: - vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool) - - # Make it so the agent sees what it's carrying - # We do this by placing the carried object at the agent's position - # in the agent's partially observable view - # agent_pos = grid.width // 2, grid.height - 1 - # if self.carrying: - # grid.set(*agent_pos, self.carrying) - # else: - # grid.set(*agent_pos, None) - - return grid, vis_mask - - def get_pos_visible_coins(self): - """ - Generate the npc's view (partially observable, low-resolution encoding) - return the list of unique visible coins - """ - - grid, vis_mask = self.gen_npc_obs_grid() - - coins_pos = [] - - for obj in grid.grid: - if isinstance(obj, Ball): - coins_pos.append(obj.cur_pos) - if self.tag_visible_coins: - obj.tag() - - return coins_pos - - def can_overlap(self): - # If the NPC is hidden, agent can overlap on it - return self.hidden_npc - - -class CoinThiefGrammar(object): - - templates = ["Here is"] - things = ["0","1","2","3","4","5","6"] - - grammar_action_space = spaces.MultiDiscrete([len(templates), len(things)]) - - @classmethod - def construct_utterance(cls, action): - return cls.templates[int(action[0])] + " " + cls.things[int(action[1])] + " " - - @classmethod - def random_utterance(cls): - return np.random.choice(cls.templates) + " " + np.random.choice(cls.things) + " " - - -class ThiefActions(IntEnum): - # Turn left, turn right, move forward - left = 0 - right = 1 - forward = 2 - - -class CoinThiefEnv(MultiModalMiniGridEnv): - """ - Environment in which the agent is instructed to go to a given object - named using an English text string - """ - - def __init__( - self, - size=5, - hear_yourself=False, - diminished_reward=True, - step_penalty=False, - hidden_npc=False, - max_steps=20, - full_obs=False, - few_actions=False, - tag_visible_coins=False, - nb_coins=6, - npc_view_size=5, - npc_look_around=True - - ): - assert size >= 5 - self.empty_symbol = "NA \n" - self.hear_yourself = hear_yourself - self.diminished_reward = diminished_reward - self.step_penalty = step_penalty - self.hidden_npc = hidden_npc - self.few_actions = few_actions - self.possible_actions = ThiefActions if self.few_actions else MiniGridEnv.Actions - self.nb_coins = nb_coins - self.tag_visible_coins = tag_visible_coins - self.npc_view_size = npc_view_size - self.npc_look_around = npc_look_around - if max_steps is None: - max_steps = 5*size**2 - - super().__init__( - grid_size=size, - max_steps=max_steps, - # Set this to True for maximum speed - see_through_walls=True, - full_obs=full_obs, - actions=MiniGridEnv.Actions, - action_space=spaces.MultiDiscrete([ - len(self.possible_actions), - *CoinThiefGrammar.grammar_action_space.nvec - ]), - add_npc_direction=True - ) - - print({ - "size": size, - "hear_yourself": hear_yourself, - "diminished_reward": diminished_reward, - "step_penalty": step_penalty, - }) - - def _gen_grid(self, width, height): - # Create the grid - self.grid = Grid(width, height, nb_obj_dims=4) - - # Randomly vary the room width and height - # width = self._rand_int(5, width+1) - # height = self._rand_int(5, height+1) - - # Generate the surrounding walls - self.grid.wall_rect(0, 0, width, height) - - # Generate the surrounding walls - self.grid.wall_rect(0, 0, width, height) - - # Randomize the agent's start position and orientation - self.place_agent(size=(width, height)) - - # Get possible near-agent positions, and place thief in one of them - ax, ay = self.agent_pos - near_agent_pos = [[ax, ay + 1], [ax, ay - 1], [ax - 1, ay], [ax + 1, ay]] - # get empty cells positions - available_pos = [] - for p in near_agent_pos: - if self.grid.get(*p) is None: - available_pos.append(p) - thief_pos = self._rand_elem(available_pos) - - # Add randomly placed coins - # Types and colors of objects we can generate - types = ['ball'] - objs = [] - objPos = [] - - # Until we have generated all the objects - while len(objs) < self.nb_coins: - objType = self._rand_elem(types) - objColor = 'yellow' - - if objType == 'ball': - obj = Ball(objColor) - else: - raise NotImplementedError - - pos = self.place_obj(obj, reject_fn=lambda env,pos: pos.tolist() == thief_pos) - objs.append((objType, objColor)) - objPos.append(pos) - - # Set a randomly coloured Thief NPC next to the agent - color = self._rand_elem(COLOR_NAMES) - - self.thief = Thief(color, "Eve", self, thief_pos, - hidden_npc=self.hidden_npc, - tag_visible_coins=self.tag_visible_coins, - view_size=self.npc_view_size, - look_around=self.npc_look_around) - - self.grid.set(*thief_pos, self.thief) - - # Generate the mission string - self.mission = 'save as much coins as possible' - - # Dummy beginning string - self.beginning_string = "This is what you hear. \n" - self.utterance = self.beginning_string - - # utterance appended at the end of each step - self.utterance_history = "" - - # used for rendering - self.conversation = self.utterance - self.outcome_info = None - - def step(self, action): - p_action = action[0] if np.isnan(action[0]) else int(action[0]) - if len(action) == 1: # agent cannot speak - utterance_action = [np.nan, np.nan] - else: - utterance_action = action[1:] - - obs, reward, done, info = super().step(p_action) - - # assert all nan or neither nan - assert len(set(np.isnan(utterance_action))) == 1 - speak_flag = not all(np.isnan(utterance_action)) - - if speak_flag: - utterance = CoinThiefGrammar.construct_utterance(utterance_action) - self.conversation += "{}: {} \n".format("Agent", utterance) - - # Don't let the agent open any doors - if not self.few_actions and p_action == self.actions.toggle: - done = True - - if not self.few_actions and p_action == self.actions.done: - done = True - - # npc's turn - agent_disobeyed, agent_gave_coins, npc_utterance = self.thief.step(p_action, utterance if speak_flag else None) - - if self.hidden_npc: - npc_utterance = None - - if npc_utterance: - self.utterance += "{} \n".format(npc_utterance) - self.conversation += "{}: {} \n".format(self.thief.name, npc_utterance) - - if agent_disobeyed: - done = True - - if agent_gave_coins: - done = True - if self.thief.nb_seen_coins == self.thief.nb_given_coins: - reward = self._reward() - self.outcome_info = "SUCCESS: agent got {} reward \n".format(np.round(reward,1)) - - if done and reward == 0: - self.outcome_info = "FAILURE: agent got {} reward \n".format(reward) - - # discount - if self.step_penalty: - reward = reward - 0.01 - - if self.hidden_npc: - # remove npc from agent view - npc_obs_idx = np.argwhere(obs['image'] == 11) - if npc_obs_idx.size != 0: # agent sees npc - obs['image'][npc_obs_idx[0][0], npc_obs_idx[0][1], :] = [1, 0, 0, 0] - - # fill observation with text - self.append_existing_utterance_to_history() - obs = self.add_utterance_to_observation(obs) - self.reset_utterance() - - return obs, reward, done, info - - def _reward(self): - if self.diminished_reward: - return super()._reward() - else: - return 1.0 - - def render(self, *args, **kwargs): - obs = super().render(*args, **kwargs) - - print("conversation:\n", self.conversation) - print("utterance_history:\n", self.utterance_history) - - self.window.clear_text() # erase previous text - - self.window.set_caption(self.conversation) # overwrites super class caption - self.window.ax.set_title(self.thief.get_status_str(), loc="left") - if self.outcome_info: - color = None - if "SUCCESS" in self.outcome_info: - color = "lime" - elif "FAILURE" in self.outcome_info: - color = "red" - self.window.add_text(*(0.01, 0.85, self.outcome_info), - **{'fontsize':15, 'color':color, 'weight':"bold"}) - - self.window.show_img(obs) # re-draw image to add changes to window - - return obs - - -class CoinThief8x8Env(CoinThiefEnv): - def __init__(self, **kwargs): - super().__init__(size=8, **kwargs) - - -class CoinThief6x6Env(CoinThiefEnv): - def __init__(self, **kwargs): - super().__init__(size=6, **kwargs) - - -register( - id='MiniGrid-CoinThief-5x5-v0', - entry_point='gym_minigrid.envs:CoinThiefEnv' -) - -register( - id='MiniGrid-CoinThief-6x6-v0', - entry_point='gym_minigrid.envs:CoinThief6x6Env' -) - -register( - id='MiniGrid-CoinThief-8x8-v0', - entry_point='gym_minigrid.envs:CoinThief8x8Env' -) \ No newline at end of file diff --git a/spaces/fuckyoudeki/AutoGPT/autogpt/chat.py b/spaces/fuckyoudeki/AutoGPT/autogpt/chat.py deleted file mode 100644 index 1f6bca96eb216c667656b50f131006b83c681065..0000000000000000000000000000000000000000 --- a/spaces/fuckyoudeki/AutoGPT/autogpt/chat.py +++ /dev/null @@ -1,175 +0,0 @@ -import time - -from openai.error import RateLimitError - -from autogpt import token_counter -from autogpt.config import Config -from autogpt.llm_utils import create_chat_completion -from autogpt.logs import logger - -cfg = Config() - - -def create_chat_message(role, content): - """ - Create a chat message with the given role and content. - - Args: - role (str): The role of the message sender, e.g., "system", "user", or "assistant". - content (str): The content of the message. - - Returns: - dict: A dictionary containing the role and content of the message. - """ - return {"role": role, "content": content} - - -def generate_context(prompt, relevant_memory, full_message_history, model): - current_context = [ - create_chat_message("system", prompt), - create_chat_message( - "system", f"The current time and date is {time.strftime('%c')}" - ), - create_chat_message( - "system", - f"This reminds you of these events from your past:\n{relevant_memory}\n\n", - ), - ] - - # Add messages from the full message history until we reach the token limit - next_message_to_add_index = len(full_message_history) - 1 - insertion_index = len(current_context) - # Count the currently used tokens - current_tokens_used = token_counter.count_message_tokens(current_context, model) - return ( - next_message_to_add_index, - current_tokens_used, - insertion_index, - current_context, - ) - - -# TODO: Change debug from hardcode to argument -def chat_with_ai( - prompt, user_input, full_message_history, permanent_memory, token_limit -): - """Interact with the OpenAI API, sending the prompt, user input, message history, - and permanent memory.""" - while True: - try: - """ - Interact with the OpenAI API, sending the prompt, user input, - message history, and permanent memory. - - Args: - prompt (str): The prompt explaining the rules to the AI. - user_input (str): The input from the user. - full_message_history (list): The list of all messages sent between the - user and the AI. - permanent_memory (Obj): The memory object containing the permanent - memory. - token_limit (int): The maximum number of tokens allowed in the API call. - - Returns: - str: The AI's response. - """ - model = cfg.fast_llm_model # TODO: Change model from hardcode to argument - # Reserve 1000 tokens for the response - - logger.debug(f"Token limit: {token_limit}") - send_token_limit = token_limit - 1000 - - relevant_memory = ( - "" - if len(full_message_history) == 0 - else permanent_memory.get_relevant(str(full_message_history[-9:]), 10) - ) - - logger.debug(f"Memory Stats: {permanent_memory.get_stats()}") - - ( - next_message_to_add_index, - current_tokens_used, - insertion_index, - current_context, - ) = generate_context(prompt, relevant_memory, full_message_history, model) - - while current_tokens_used > 2500: - # remove memories until we are under 2500 tokens - relevant_memory = relevant_memory[:-1] - ( - next_message_to_add_index, - current_tokens_used, - insertion_index, - current_context, - ) = generate_context( - prompt, relevant_memory, full_message_history, model - ) - - current_tokens_used += token_counter.count_message_tokens( - [create_chat_message("user", user_input)], model - ) # Account for user input (appended later) - - while next_message_to_add_index >= 0: - # print (f"CURRENT TOKENS USED: {current_tokens_used}") - message_to_add = full_message_history[next_message_to_add_index] - - tokens_to_add = token_counter.count_message_tokens( - [message_to_add], model - ) - if current_tokens_used + tokens_to_add > send_token_limit: - break - - # Add the most recent message to the start of the current context, - # after the two system prompts. - current_context.insert( - insertion_index, full_message_history[next_message_to_add_index] - ) - - # Count the currently used tokens - current_tokens_used += tokens_to_add - - # Move to the next most recent message in the full message history - next_message_to_add_index -= 1 - - # Append user input, the length of this is accounted for above - current_context.extend([create_chat_message("user", user_input)]) - - # Calculate remaining tokens - tokens_remaining = token_limit - current_tokens_used - # assert tokens_remaining >= 0, "Tokens remaining is negative. - # This should never happen, please submit a bug report at - # https://www.github.com/Torantulino/Auto-GPT" - - # Debug print the current context - logger.debug(f"Token limit: {token_limit}") - logger.debug(f"Send Token Count: {current_tokens_used}") - logger.debug(f"Tokens remaining for response: {tokens_remaining}") - logger.debug("------------ CONTEXT SENT TO AI ---------------") - for message in current_context: - # Skip printing the prompt - if message["role"] == "system" and message["content"] == prompt: - continue - logger.debug(f"{message['role'].capitalize()}: {message['content']}") - logger.debug("") - logger.debug("----------- END OF CONTEXT ----------------") - - # TODO: use a model defined elsewhere, so that model can contain - # temperature and other settings we care about - assistant_reply = create_chat_completion( - model=model, - messages=current_context, - max_tokens=tokens_remaining, - ) - - # Update full message history - full_message_history.append(create_chat_message("user", user_input)) - full_message_history.append( - create_chat_message("assistant", assistant_reply) - ) - - return assistant_reply - except RateLimitError: - # TODO: When we switch to langchain, this is built in - print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...") - time.sleep(10) diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/hooks/hook.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/hooks/hook.py deleted file mode 100644 index b8855c107727ecf85b917c890fc8b7f6359238a4..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/hooks/hook.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from annotator.uniformer.mmcv.utils import Registry, is_method_overridden - -HOOKS = Registry('hook') - - -class Hook: - stages = ('before_run', 'before_train_epoch', 'before_train_iter', - 'after_train_iter', 'after_train_epoch', 'before_val_epoch', - 'before_val_iter', 'after_val_iter', 'after_val_epoch', - 'after_run') - - def before_run(self, runner): - pass - - def after_run(self, runner): - pass - - def before_epoch(self, runner): - pass - - def after_epoch(self, runner): - pass - - def before_iter(self, runner): - pass - - def after_iter(self, runner): - pass - - def before_train_epoch(self, runner): - self.before_epoch(runner) - - def before_val_epoch(self, runner): - self.before_epoch(runner) - - def after_train_epoch(self, runner): - self.after_epoch(runner) - - def after_val_epoch(self, runner): - self.after_epoch(runner) - - def before_train_iter(self, runner): - self.before_iter(runner) - - def before_val_iter(self, runner): - self.before_iter(runner) - - def after_train_iter(self, runner): - self.after_iter(runner) - - def after_val_iter(self, runner): - self.after_iter(runner) - - def every_n_epochs(self, runner, n): - return (runner.epoch + 1) % n == 0 if n > 0 else False - - def every_n_inner_iters(self, runner, n): - return (runner.inner_iter + 1) % n == 0 if n > 0 else False - - def every_n_iters(self, runner, n): - return (runner.iter + 1) % n == 0 if n > 0 else False - - def end_of_epoch(self, runner): - return runner.inner_iter + 1 == len(runner.data_loader) - - def is_last_epoch(self, runner): - return runner.epoch + 1 == runner._max_epochs - - def is_last_iter(self, runner): - return runner.iter + 1 == runner._max_iters - - def get_triggered_stages(self): - trigger_stages = set() - for stage in Hook.stages: - if is_method_overridden(stage, Hook, self): - trigger_stages.add(stage) - - # some methods will be triggered in multi stages - # use this dict to map method to stages. - method_stages_map = { - 'before_epoch': ['before_train_epoch', 'before_val_epoch'], - 'after_epoch': ['after_train_epoch', 'after_val_epoch'], - 'before_iter': ['before_train_iter', 'before_val_iter'], - 'after_iter': ['after_train_iter', 'after_val_iter'], - } - - for method, map_stages in method_stages_map.items(): - if is_method_overridden(method, Hook, self): - trigger_stages.update(map_stages) - - return [stage for stage in Hook.stages if stage in trigger_stages] diff --git a/spaces/gossminn/fillmorle-app/sftp/metrics/base_f.py b/spaces/gossminn/fillmorle-app/sftp/metrics/base_f.py deleted file mode 100644 index aca78b5605fc7c47b0726e3aafd188eebeb9c1a7..0000000000000000000000000000000000000000 --- a/spaces/gossminn/fillmorle-app/sftp/metrics/base_f.py +++ /dev/null @@ -1,27 +0,0 @@ -from abc import ABC -from typing import * - -from allennlp.training.metrics import Metric - - -class BaseF(Metric, ABC): - def __init__(self, prefix: str): - self.tp = self.fp = self.fn = 0 - self.prefix = prefix - - def reset(self) -> None: - self.tp = self.fp = self.fn = 0 - - def get_metric( - self, reset: bool - ) -> Union[float, Tuple[float, ...], Dict[str, float], Dict[str, List[float]]]: - precision = self.tp * 100 / (self.tp + self.fp) if self.tp > 0 else 0. - recall = self.tp * 100 / (self.tp + self.fn) if self.tp > 0 else 0. - rst = { - f'{self.prefix}_p': precision, - f'{self.prefix}_r': recall, - f'{self.prefix}_f': 2 / (1 / precision + 1 / recall) if self.tp > 0 else 0. - } - if reset: - self.reset() - return rst diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Da Cor A Cor Inexistente Israel Pedrosa Pdf 16 An Analysis of the Authors Work and Influence on Color Research.md b/spaces/gotiQspiryo/whisper-ui/examples/Da Cor A Cor Inexistente Israel Pedrosa Pdf 16 An Analysis of the Authors Work and Influence on Color Research.md deleted file mode 100644 index 11ccbffe93e35111505566533f5e796d580af605..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Da Cor A Cor Inexistente Israel Pedrosa Pdf 16 An Analysis of the Authors Work and Influence on Color Research.md +++ /dev/null @@ -1,6 +0,0 @@ - -

    Pintor, professor e pesquisador, Israel Pedrosa revelou em 1967 o domínio da cor inexistente depois de 16 anos de pesquisa. Publicado em 1977, esse verdadeiro tratado histórico sobre as cores chega à 10ª edição com a importância que apenas as obras clássicas mantêm.

    -

    Da Cor A Cor Inexistente Israel Pedrosa Pdf 16


    DOWNLOAD 🆓 https://urlgoal.com/2uyMX9



    -

    O livro aborda o desenvolvimento da teoria das cores desde Da Vinci, passando por Newton, Goethe, Maxwell e Chevreul, entre outros estudiosos, e trata com clareza de temas como harmonização das cores, mutações cromáticas e cor inexistente.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Miller And Levine Biology Study Workbook A Answer Key Pdf.md b/spaces/gotiQspiryo/whisper-ui/examples/Miller And Levine Biology Study Workbook A Answer Key Pdf.md deleted file mode 100644 index 7d6fe7453457c5457dede1f0be097acd8d63842e..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Miller And Levine Biology Study Workbook A Answer Key Pdf.md +++ /dev/null @@ -1,20 +0,0 @@ - -

    How to Ace Biology with Miller And Levine Biology Study Workbook A Answer Key Pdf

    - -

    If you are looking for a comprehensive and reliable guide to help you master biology, you might want to check out Miller And Levine Biology Study Workbook A Answer Key Pdf. This workbook is designed to accompany the textbook Biology by Kenneth R. Miller and Joseph S. Levine, which is widely used in high school biology courses. The workbook provides exercises, vocabulary reviews, and practice tests for each chapter of the textbook, as well as answers and explanations for all the questions.

    -

    Miller And Levine Biology Study Workbook A Answer Key Pdf


    Download Zip - https://urlgoal.com/2uyNfO



    - -

    Miller And Levine Biology Study Workbook A Answer Key Pdf covers all the major topics in biology, such as the science of life, the chemistry of life, the biosphere, ecosystems and communities, populations, humans in the biosphere, cell structure and function, photosynthesis, cellular respiration, cell growth and division, genetics, DNA and RNA, protein synthesis, genetic engineering, evolution, the history of life, classification, bacteria and viruses, protists and fungi, plants, animals, human biology, and ecology.

    - -

    By using Miller And Levine Biology Study Workbook A Answer Key Pdf, you can reinforce your understanding of the concepts and skills taught in the textbook, review key terms and definitions, apply your knowledge to real-world scenarios, test your comprehension and critical thinking skills, and prepare for exams and quizzes. You can also access additional resources online at Quizlet, where you can find flashcards, games, and study guides for each chapter of the workbook.

    - -

    Miller And Levine Biology Study Workbook A Answer Key Pdf is available for download at Archive, where you can also find other related books and materials. Whether you are a student or a teacher, Miller And Levine Biology Study Workbook A Answer Key Pdf can help you achieve your goals in biology.

    -

    - -

    One of the benefits of using Miller And Levine Biology Study Workbook A Answer Key Pdf is that it follows the same structure and organization as the textbook, making it easy to follow along and review the material. Each chapter of the workbook corresponds to a chapter of the textbook, and each section of the workbook corresponds to a section of the textbook. The workbook also uses the same color scheme and icons as the textbook, helping you identify the main ideas and key points.

    - -

    Another benefit of using Miller And Levine Biology Study Workbook A Answer Key Pdf is that it provides a variety of exercises and activities to suit different learning styles and preferences. You can choose from multiple-choice, short-answer, fill-in-the-blank, matching, true-false, and essay questions, as well as diagrams, graphs, tables, maps, and charts. You can also find cross-curricular connections, critical thinking questions, and science literacy skills in each chapter. The workbook also offers tips and strategies for studying biology effectively and efficiently.

    - -

    A third benefit of using Miller And Levine Biology Study Workbook A Answer Key Pdf is that it helps you prepare for standardized tests and college entrance exams. The workbook aligns with the Next Generation Science Standards (NGSS) and the Common Core State Standards (CCSS), ensuring that you are learning the most relevant and up-to-date content and skills in biology. The workbook also includes practice tests for each unit of the textbook, as well as a comprehensive final exam at the end of the workbook. The practice tests simulate the format and difficulty level of real exams, giving you an opportunity to assess your strengths and weaknesses and improve your test-taking skills.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/gradio/HuBERT/examples/roberta/wsc/README.md b/spaces/gradio/HuBERT/examples/roberta/wsc/README.md deleted file mode 100644 index 21a045d999739836a17574593292e42131315ae9..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/roberta/wsc/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# Finetuning RoBERTa on Winograd Schema Challenge (WSC) data - -The following instructions can be used to finetune RoBERTa on the WSC training -data provided by [SuperGLUE](https://super.gluebenchmark.com/). - -Note that there is high variance in the results. For our GLUE/SuperGLUE -submission we swept over the learning rate (1e-5, 2e-5, 3e-5), batch size (16, -32, 64) and total number of updates (500, 1000, 2000, 3000), as well as the -random seed. Out of ~100 runs we chose the best 7 models and ensembled them. - -**Approach:** The instructions below use a slightly different loss function than -what's described in the original RoBERTa arXiv paper. In particular, -[Kocijan et al. (2019)](https://arxiv.org/abs/1905.06290) introduce a margin -ranking loss between `(query, candidate)` pairs with tunable hyperparameters -alpha and beta. This is supported in our code as well with the `--wsc-alpha` and -`--wsc-beta` arguments. However, we achieved slightly better (and more robust) -results on the development set by instead using a single cross entropy loss term -over the log-probabilities for the query and all mined candidates. **The -candidates are mined using spaCy from each input sentence in isolation, so the -approach remains strictly pointwise.** This reduces the number of -hyperparameters and our best model achieved 92.3% development set accuracy, -compared to ~90% accuracy for the margin loss. Later versions of the RoBERTa -arXiv paper will describe this updated formulation. - -### 1) Download the WSC data from the SuperGLUE website: -```bash -wget https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip -unzip WSC.zip - -# we also need to copy the RoBERTa dictionary into the same directory -wget -O WSC/dict.txt https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt -``` - -### 2) Finetune over the provided training data: -```bash -TOTAL_NUM_UPDATES=2000 # Total number of training steps. -WARMUP_UPDATES=250 # Linearly increase LR over this many steps. -LR=2e-05 # Peak LR for polynomial LR scheduler. -MAX_SENTENCES=16 # Batch size per GPU. -SEED=1 # Random seed. -ROBERTA_PATH=/path/to/roberta/model.pt - -# we use the --user-dir option to load the task and criterion -# from the examples/roberta/wsc directory: -FAIRSEQ_PATH=/path/to/fairseq -FAIRSEQ_USER_DIR=${FAIRSEQ_PATH}/examples/roberta/wsc - -CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-train WSC/ \ - --restore-file $ROBERTA_PATH \ - --reset-optimizer --reset-dataloader --reset-meters \ - --no-epoch-checkpoints --no-last-checkpoints --no-save-optimizer-state \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --valid-subset val \ - --fp16 --ddp-backend legacy_ddp \ - --user-dir $FAIRSEQ_USER_DIR \ - --task wsc --criterion wsc --wsc-cross-entropy \ - --arch roberta_large --bpe gpt2 --max-positions 512 \ - --dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \ - --optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 \ - --lr-scheduler polynomial_decay --lr $LR \ - --warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_NUM_UPDATES \ - --batch-size $MAX_SENTENCES \ - --max-update $TOTAL_NUM_UPDATES \ - --log-format simple --log-interval 100 \ - --seed $SEED -``` - -The above command assumes training on 4 GPUs, but you can achieve the same -results on a single GPU by adding `--update-freq=4`. - -### 3) Evaluate -```python -from fairseq.models.roberta import RobertaModel -from examples.roberta.wsc import wsc_utils # also loads WSC task and criterion -roberta = RobertaModel.from_pretrained('checkpoints', 'checkpoint_best.pt', 'WSC/') -roberta.cuda() -nsamples, ncorrect = 0, 0 -for sentence, label in wsc_utils.jsonl_iterator('WSC/val.jsonl', eval=True): - pred = roberta.disambiguate_pronoun(sentence) - nsamples += 1 - if pred == label: - ncorrect += 1 -print('Accuracy: ' + str(ncorrect / float(nsamples))) -# Accuracy: 0.9230769230769231 -``` - -## RoBERTa training on WinoGrande dataset -We have also provided `winogrande` task and criterion for finetuning on the -[WinoGrande](https://mosaic.allenai.org/projects/winogrande) like datasets -where there are always two candidates and one is correct. -It's more efficient implementation for such subcases. - -```bash -TOTAL_NUM_UPDATES=23750 # Total number of training steps. -WARMUP_UPDATES=2375 # Linearly increase LR over this many steps. -LR=1e-05 # Peak LR for polynomial LR scheduler. -MAX_SENTENCES=32 # Batch size per GPU. -SEED=1 # Random seed. -ROBERTA_PATH=/path/to/roberta/model.pt - -# we use the --user-dir option to load the task and criterion -# from the examples/roberta/wsc directory: -FAIRSEQ_PATH=/path/to/fairseq -FAIRSEQ_USER_DIR=${FAIRSEQ_PATH}/examples/roberta/wsc - -cd fairseq -CUDA_VISIBLE_DEVICES=0 fairseq-train winogrande_1.0/ \ - --restore-file $ROBERTA_PATH \ - --reset-optimizer --reset-dataloader --reset-meters \ - --no-epoch-checkpoints --no-last-checkpoints --no-save-optimizer-state \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --valid-subset val \ - --fp16 --ddp-backend legacy_ddp \ - --user-dir $FAIRSEQ_USER_DIR \ - --task winogrande --criterion winogrande \ - --wsc-margin-alpha 5.0 --wsc-margin-beta 0.4 \ - --arch roberta_large --bpe gpt2 --max-positions 512 \ - --dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \ - --optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 \ - --lr-scheduler polynomial_decay --lr $LR \ - --warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_NUM_UPDATES \ - --batch-size $MAX_SENTENCES \ - --max-update $TOTAL_NUM_UPDATES \ - --log-format simple --log-interval 100 -``` diff --git a/spaces/gradio/HuBERT/examples/translation/README.md b/spaces/gradio/HuBERT/examples/translation/README.md deleted file mode 100644 index 2941f5eb8482dab61dca5eca27a71abd7ee5bf5c..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/translation/README.md +++ /dev/null @@ -1,301 +0,0 @@ -# Neural Machine Translation - -This README contains instructions for [using pretrained translation models](#example-usage-torchhub) -as well as [training new models](#training-a-new-model). - -## Pre-trained models - -Model | Description | Dataset | Download ----|---|---|--- -`conv.wmt14.en-fr` | Convolutional
    ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2)
    newstest2014:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.newstest2014.tar.bz2)
    newstest2012/2013:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.ntst1213.tar.bz2) -`conv.wmt14.en-de` | Convolutional
    ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-German](http://statmt.org/wmt14/translation-task.html#Download) | model:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2)
    newstest2014:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-de.newstest2014.tar.bz2) -`conv.wmt17.en-de` | Convolutional
    ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT17 English-German](http://statmt.org/wmt17/translation-task.html#Download) | model:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2)
    newstest2014:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.v2.en-de.newstest2014.tar.bz2) -`transformer.wmt14.en-fr` | Transformer
    ([Ott et al., 2018](https://arxiv.org/abs/1806.00187)) | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2)
    newstest2014:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2) -`transformer.wmt16.en-de` | Transformer
    ([Ott et al., 2018](https://arxiv.org/abs/1806.00187)) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2)
    newstest2014:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`transformer.wmt18.en-de` | Transformer
    ([Edunov et al., 2018](https://arxiv.org/abs/1808.09381))
    WMT'18 winner | [WMT'18 English-German](http://www.statmt.org/wmt18/translation-task.html) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz)
    See NOTE in the archive -`transformer.wmt19.en-de` | Transformer
    ([Ng et al., 2019](https://arxiv.org/abs/1907.06616))
    WMT'19 winner | [WMT'19 English-German](http://www.statmt.org/wmt19/translation-task.html) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz) -`transformer.wmt19.de-en` | Transformer
    ([Ng et al., 2019](https://arxiv.org/abs/1907.06616))
    WMT'19 winner | [WMT'19 German-English](http://www.statmt.org/wmt19/translation-task.html) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz) -`transformer.wmt19.en-ru` | Transformer
    ([Ng et al., 2019](https://arxiv.org/abs/1907.06616))
    WMT'19 winner | [WMT'19 English-Russian](http://www.statmt.org/wmt19/translation-task.html) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz) -`transformer.wmt19.ru-en` | Transformer
    ([Ng et al., 2019](https://arxiv.org/abs/1907.06616))
    WMT'19 winner | [WMT'19 Russian-English](http://www.statmt.org/wmt19/translation-task.html) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz) - -## Example usage (torch.hub) - -We require a few additional Python dependencies for preprocessing: -```bash -pip install fastBPE sacremoses subword_nmt -``` - -Interactive translation via PyTorch Hub: -```python -import torch - -# List available models -torch.hub.list('pytorch/fairseq') # [..., 'transformer.wmt16.en-de', ... ] - -# Load a transformer trained on WMT'16 En-De -# Note: WMT'19 models use fastBPE instead of subword_nmt, see instructions below -en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt16.en-de', - tokenizer='moses', bpe='subword_nmt') -en2de.eval() # disable dropout - -# The underlying model is available under the *models* attribute -assert isinstance(en2de.models[0], fairseq.models.transformer.TransformerModel) - -# Move model to GPU for faster translation -en2de.cuda() - -# Translate a sentence -en2de.translate('Hello world!') -# 'Hallo Welt!' - -# Batched translation -en2de.translate(['Hello world!', 'The cat sat on the mat.']) -# ['Hallo Welt!', 'Die Katze saß auf der Matte.'] -``` - -Loading custom models: -```python -from fairseq.models.transformer import TransformerModel -zh2en = TransformerModel.from_pretrained( - '/path/to/checkpoints', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='data-bin/wmt17_zh_en_full', - bpe='subword_nmt', - bpe_codes='data-bin/wmt17_zh_en_full/zh.code' -) -zh2en.translate('你好 世界') -# 'Hello World' -``` - -If you are using a `transformer.wmt19` models, you will need to set the `bpe` -argument to `'fastbpe'` and (optionally) load the 4-model ensemble: -```python -en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de', - checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt', - tokenizer='moses', bpe='fastbpe') -en2de.eval() # disable dropout -``` - -## Example usage (CLI tools) - -Generation with the binarized test sets can be run in batch mode as follows, e.g. for WMT 2014 English-French on a GTX-1080ti: -```bash -mkdir -p data-bin -curl https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2 | tar xvjf - -C data-bin -curl https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.newstest2014.tar.bz2 | tar xvjf - -C data-bin -fairseq-generate data-bin/wmt14.en-fr.newstest2014 \ - --path data-bin/wmt14.en-fr.fconv-py/model.pt \ - --beam 5 --batch-size 128 --remove-bpe | tee /tmp/gen.out -# ... -# | Translated 3003 sentences (96311 tokens) in 166.0s (580.04 tokens/s) -# | Generate test with beam=5: BLEU4 = 40.83, 67.5/46.9/34.4/25.5 (BP=1.000, ratio=1.006, syslen=83262, reflen=82787) - -# Compute BLEU score -grep ^H /tmp/gen.out | cut -f3- > /tmp/gen.out.sys -grep ^T /tmp/gen.out | cut -f2- > /tmp/gen.out.ref -fairseq-score --sys /tmp/gen.out.sys --ref /tmp/gen.out.ref -# BLEU4 = 40.83, 67.5/46.9/34.4/25.5 (BP=1.000, ratio=1.006, syslen=83262, reflen=82787) -``` - -## Training a new model - -### IWSLT'14 German to English (Transformer) - -The following instructions can be used to train a Transformer model on the [IWSLT'14 German to English dataset](http://workshop2014.iwslt.org/downloads/proceeding.pdf). - -First download and preprocess the data: -```bash -# Download and prepare the data -cd examples/translation/ -bash prepare-iwslt14.sh -cd ../.. - -# Preprocess/binarize the data -TEXT=examples/translation/iwslt14.tokenized.de-en -fairseq-preprocess --source-lang de --target-lang en \ - --trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \ - --destdir data-bin/iwslt14.tokenized.de-en \ - --workers 20 -``` - -Next we'll train a Transformer translation model over this data: -```bash -CUDA_VISIBLE_DEVICES=0 fairseq-train \ - data-bin/iwslt14.tokenized.de-en \ - --arch transformer_iwslt_de_en --share-decoder-input-output-embed \ - --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \ - --lr 5e-4 --lr-scheduler inverse_sqrt --warmup-updates 4000 \ - --dropout 0.3 --weight-decay 0.0001 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --max-tokens 4096 \ - --eval-bleu \ - --eval-bleu-args '{"beam": 5, "max_len_a": 1.2, "max_len_b": 10}' \ - --eval-bleu-detok moses \ - --eval-bleu-remove-bpe \ - --eval-bleu-print-samples \ - --best-checkpoint-metric bleu --maximize-best-checkpoint-metric -``` - -Finally we can evaluate our trained model: -```bash -fairseq-generate data-bin/iwslt14.tokenized.de-en \ - --path checkpoints/checkpoint_best.pt \ - --batch-size 128 --beam 5 --remove-bpe -``` - -### WMT'14 English to German (Convolutional) - -The following instructions can be used to train a Convolutional translation model on the WMT English to German dataset. -See the [Scaling NMT README](../scaling_nmt/README.md) for instructions to train a Transformer translation model on this data. - -The WMT English to German dataset can be preprocessed using the `prepare-wmt14en2de.sh` script. -By default it will produce a dataset that was modeled after [Attention Is All You Need (Vaswani et al., 2017)](https://arxiv.org/abs/1706.03762), but with additional news-commentary-v12 data from WMT'17. - -To use only data available in WMT'14 or to replicate results obtained in the original [Convolutional Sequence to Sequence Learning (Gehring et al., 2017)](https://arxiv.org/abs/1705.03122) paper, please use the `--icml17` option. - -```bash -# Download and prepare the data -cd examples/translation/ -# WMT'17 data: -bash prepare-wmt14en2de.sh -# or to use WMT'14 data: -# bash prepare-wmt14en2de.sh --icml17 -cd ../.. - -# Binarize the dataset -TEXT=examples/translation/wmt17_en_de -fairseq-preprocess \ - --source-lang en --target-lang de \ - --trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \ - --destdir data-bin/wmt17_en_de --thresholdtgt 0 --thresholdsrc 0 \ - --workers 20 - -# Train the model -mkdir -p checkpoints/fconv_wmt_en_de -fairseq-train \ - data-bin/wmt17_en_de \ - --arch fconv_wmt_en_de \ - --dropout 0.2 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --optimizer nag --clip-norm 0.1 \ - --lr 0.5 --lr-scheduler fixed --force-anneal 50 \ - --max-tokens 4000 \ - --save-dir checkpoints/fconv_wmt_en_de - -# Evaluate -fairseq-generate data-bin/wmt17_en_de \ - --path checkpoints/fconv_wmt_en_de/checkpoint_best.pt \ - --beam 5 --remove-bpe -``` - -### WMT'14 English to French -```bash -# Download and prepare the data -cd examples/translation/ -bash prepare-wmt14en2fr.sh -cd ../.. - -# Binarize the dataset -TEXT=examples/translation/wmt14_en_fr -fairseq-preprocess \ - --source-lang en --target-lang fr \ - --trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \ - --destdir data-bin/wmt14_en_fr --thresholdtgt 0 --thresholdsrc 0 \ - --workers 60 - -# Train the model -mkdir -p checkpoints/fconv_wmt_en_fr -fairseq-train \ - data-bin/wmt14_en_fr \ - --arch fconv_wmt_en_fr \ - --dropout 0.1 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --optimizer nag --clip-norm 0.1 \ - --lr 0.5 --lr-scheduler fixed --force-anneal 50 \ - --max-tokens 3000 \ - --save-dir checkpoints/fconv_wmt_en_fr - -# Evaluate -fairseq-generate \ - data-bin/fconv_wmt_en_fr \ - --path checkpoints/fconv_wmt_en_fr/checkpoint_best.pt \ - --beam 5 --remove-bpe -``` - -## Multilingual Translation - -We also support training multilingual translation models. In this example we'll -train a multilingual `{de,fr}-en` translation model using the IWSLT'17 datasets. - -Note that we use slightly different preprocessing here than for the IWSLT'14 -En-De data above. In particular we learn a joint BPE code for all three -languages and use fairseq-interactive and sacrebleu for scoring the test set. - -```bash -# First install sacrebleu and sentencepiece -pip install sacrebleu sentencepiece - -# Then download and preprocess the data -cd examples/translation/ -bash prepare-iwslt17-multilingual.sh -cd ../.. - -# Binarize the de-en dataset -TEXT=examples/translation/iwslt17.de_fr.en.bpe16k -fairseq-preprocess --source-lang de --target-lang en \ - --trainpref $TEXT/train.bpe.de-en \ - --validpref $TEXT/valid0.bpe.de-en,$TEXT/valid1.bpe.de-en,$TEXT/valid2.bpe.de-en,$TEXT/valid3.bpe.de-en,$TEXT/valid4.bpe.de-en,$TEXT/valid5.bpe.de-en \ - --destdir data-bin/iwslt17.de_fr.en.bpe16k \ - --workers 10 - -# Binarize the fr-en dataset -# NOTE: it's important to reuse the en dictionary from the previous step -fairseq-preprocess --source-lang fr --target-lang en \ - --trainpref $TEXT/train.bpe.fr-en \ - --validpref $TEXT/valid0.bpe.fr-en,$TEXT/valid1.bpe.fr-en,$TEXT/valid2.bpe.fr-en,$TEXT/valid3.bpe.fr-en,$TEXT/valid4.bpe.fr-en,$TEXT/valid5.bpe.fr-en \ - --tgtdict data-bin/iwslt17.de_fr.en.bpe16k/dict.en.txt \ - --destdir data-bin/iwslt17.de_fr.en.bpe16k \ - --workers 10 - -# Train a multilingual transformer model -# NOTE: the command below assumes 1 GPU, but accumulates gradients from -# 8 fwd/bwd passes to simulate training on 8 GPUs -mkdir -p checkpoints/multilingual_transformer -CUDA_VISIBLE_DEVICES=0 fairseq-train data-bin/iwslt17.de_fr.en.bpe16k/ \ - --max-epoch 50 \ - --ddp-backend=legacy_ddp \ - --task multilingual_translation --lang-pairs de-en,fr-en \ - --arch multilingual_transformer_iwslt_de_en \ - --share-decoders --share-decoder-input-output-embed \ - --optimizer adam --adam-betas '(0.9, 0.98)' \ - --lr 0.0005 --lr-scheduler inverse_sqrt \ - --warmup-updates 4000 --warmup-init-lr '1e-07' \ - --label-smoothing 0.1 --criterion label_smoothed_cross_entropy \ - --dropout 0.3 --weight-decay 0.0001 \ - --save-dir checkpoints/multilingual_transformer \ - --max-tokens 4000 \ - --update-freq 8 - -# Generate and score the test set with sacrebleu -SRC=de -sacrebleu --test-set iwslt17 --language-pair ${SRC}-en --echo src \ - | python scripts/spm_encode.py --model examples/translation/iwslt17.de_fr.en.bpe16k/sentencepiece.bpe.model \ - > iwslt17.test.${SRC}-en.${SRC}.bpe -cat iwslt17.test.${SRC}-en.${SRC}.bpe \ - | fairseq-interactive data-bin/iwslt17.de_fr.en.bpe16k/ \ - --task multilingual_translation --lang-pairs de-en,fr-en \ - --source-lang ${SRC} --target-lang en \ - --path checkpoints/multilingual_transformer/checkpoint_best.pt \ - --buffer-size 2000 --batch-size 128 \ - --beam 5 --remove-bpe=sentencepiece \ - > iwslt17.test.${SRC}-en.en.sys -grep ^H iwslt17.test.${SRC}-en.en.sys | cut -f3 \ - | sacrebleu --test-set iwslt17 --language-pair ${SRC}-en -``` - -##### Argument format during inference - -During inference it is required to specify a single `--source-lang` and -`--target-lang`, which indicates the inference langauge direction. -`--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to -the same value as training. diff --git a/spaces/gradio/HuBERT/fairseq/data/legacy/masked_lm_dictionary.py b/spaces/gradio/HuBERT/fairseq/data/legacy/masked_lm_dictionary.py deleted file mode 100644 index dee88f7a3ed72ea465ea4e8ffe7b1c01ff6f57f1..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/data/legacy/masked_lm_dictionary.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.data import Dictionary - - -class MaskedLMDictionary(Dictionary): - """ - Dictionary for Masked Language Modelling tasks. This extends Dictionary by - adding the mask symbol. - """ - - def __init__( - self, - pad="", - eos="", - unk="", - mask="", - ): - super().__init__(pad=pad, eos=eos, unk=unk) - self.mask_word = mask - self.mask_index = self.add_symbol(mask) - self.nspecial = len(self.symbols) - - def mask(self): - """Helper to get index of mask symbol""" - return self.mask_index - - -class BertDictionary(MaskedLMDictionary): - """ - Dictionary for BERT task. This extends MaskedLMDictionary by adding support - for cls and sep symbols. - """ - - def __init__( - self, - pad="", - eos="", - unk="", - mask="", - cls="", - sep="", - ): - super().__init__(pad=pad, eos=eos, unk=unk, mask=mask) - self.cls_word = cls - self.sep_word = sep - self.cls_index = self.add_symbol(cls) - self.sep_index = self.add_symbol(sep) - self.nspecial = len(self.symbols) - - def cls(self): - """Helper to get index of cls symbol""" - return self.cls_index - - def sep(self): - """Helper to get index of sep symbol""" - return self.sep_index diff --git a/spaces/gradio/HuBERT/fairseq/modules/sparse_transformer_sentence_encoder_layer.py b/spaces/gradio/HuBERT/fairseq/modules/sparse_transformer_sentence_encoder_layer.py deleted file mode 100644 index d95da59c2471bfa858fd627605196d7f41f9ec12..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/modules/sparse_transformer_sentence_encoder_layer.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.modules import TransformerSentenceEncoderLayer -from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention - - -class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): - """ - Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention) - """ - - def __init__( - self, - embedding_dim: int = 768, - ffn_embedding_dim: int = 3072, - num_attention_heads: int = 8, - dropout: float = 0.1, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - activation_fn: str = "relu", - export: bool = False, - is_bidirectional: bool = True, - stride: int = 32, - expressivity: int = 8, - ) -> None: - - super().__init__( - embedding_dim, - ffn_embedding_dim, - num_attention_heads, - dropout, - attention_dropout, - activation_dropout, - activation_fn, - export, - ) - - self.self_attn = SparseMultiheadAttention( - self.embedding_dim, - num_attention_heads, - dropout=attention_dropout, - add_bias_kv=False, - add_zero_attn=False, - self_attention=True, - is_bidirectional=is_bidirectional, - stride=stride, - expressivity=expressivity, - ) diff --git a/spaces/gstaff/test_space/monsterMakerTemplate.html b/spaces/gstaff/test_space/monsterMakerTemplate.html deleted file mode 100644 index 6946da8b0ffb3a3f8951e335ea78116b37004828..0000000000000000000000000000000000000000 --- a/spaces/gstaff/test_space/monsterMakerTemplate.html +++ /dev/null @@ -1,158 +0,0 @@ - - - - - - - - Document - - - - - - - - -
    -
    -
    -
    -
    - Picture of {name} -
    -
    -
    -

    {name}

    -

    {monster_type}

    -
    -
    -
    -
      -
    • -

      - Armor Class - {armor_class} -

      -
    • -
    • -

      - Hit Points - {hit_points} -

      -
    • -
    • -

      - Speed - {speed} -

      -
    • -
    -
    -
    -
    - Str - {str_stat} -
    -
    - Dex - {dex_stat} -
    -
    - Con - {con_stat} -
    -
    - Int - {int_stat} -
    -
    - Wis - {wis_stat} -
    -
    - Cha - {cha_stat} -
    -
    -
    -
      -
    • -

      - Saving Throws - {saving_throws} -

      -
    • -
    • -

      - Skills - {skills} -

      -
    • -
    • -

      - Damage Vulnerabilities - {damage_vulnerabilities} -

      -
    • -
    • -

      - Damage Resistances - {damage_resistances} -

      -
    • -
    • -

      - Damage Immunities - {damage_immunities} -

      -
    • -
    • -

      - Condition Immunities - {condition_immunities} -

      -
    • -
    • -

      - Senses - {senses} -

      -
    • -
    • -

      - Languages - {languages} -

      -
    • -
    • -

      - Challenge - {challenge} -

      -
    • -
    -
    -
    Traits
    -
    - {passives} -
    Actions
    -
    - {actions} -
    Notes
    -
    -
    -

    {description}

    -
    - -
    - -
    -
    - - - \ No newline at end of file diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/__init__.py b/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/hank1996/yolopv2/utils/aws/resume.py b/spaces/hank1996/yolopv2/utils/aws/resume.py deleted file mode 100644 index bdbeca242726e87e46eb166bd2d9b5cfacedd090..0000000000000000000000000000000000000000 --- a/spaces/hank1996/yolopv2/utils/aws/resume.py +++ /dev/null @@ -1,36 +0,0 @@ - -import os -import sys -from pathlib import Path - -import torch -import yaml - -sys.path.append('./') # to run '$ python *.py' files in subdirectories - -port = 0 # --master_port -path = Path('').resolve() -for last in path.rglob('*/**/last.pt'): - ckpt = torch.load(last) - if ckpt['optimizer'] is None: - continue - - # Load opt.yaml - with open(last.parent.parent / 'opt.yaml') as f: - opt = yaml.load(f, Loader=yaml.SafeLoader) - - # Get device count - d = opt['device'].split(',') # devices - nd = len(d) # number of devices - ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel - - if ddp: # multi-GPU - port += 1 - cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' - else: # single-GPU - cmd = f'python train.py --resume {last}' - - cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread - print(cmd) - os.system(cmd) - diff --git a/spaces/harley001/anime-remove-background/app.py b/spaces/harley001/anime-remove-background/app.py deleted file mode 100644 index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000 --- a/spaces/harley001/anime-remove-background/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -import huggingface_hub -import onnxruntime as rt -import numpy as np -import cv2 - - -def get_mask(img, s=1024): - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = rmbg_model.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis] - return mask - - -def rmbg_fn(img): - mask = get_mask(img) - img = (mask * img + 255 * (1 - mask)).astype(np.uint8) - mask = (mask * 255).astype(np.uint8) - img = np.concatenate([img, mask], axis=2, dtype=np.uint8) - mask = mask.repeat(3, axis=2) - return mask, img - - -if __name__ == "__main__": - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - rmbg_model = rt.InferenceSession(model_path, providers=providers) - app = gr.Blocks() - with app: - gr.Markdown("# Anime Remove Background\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n" - "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)") - with gr.Row(): - with gr.Column(): - input_img = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] - examples = gr.Dataset(components=[input_img], samples=examples_data) - run_btn = gr.Button(variant="primary") - output_mask = gr.Image(label="mask") - output_img = gr.Image(label="result", image_mode="RGBA") - examples.click(lambda x: x[0], [examples], [input_img]) - run_btn.click(rmbg_fn, [input_img], [output_mask, output_img]) - app.launch() diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/model_restore.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/model_restore.py deleted file mode 100644 index f396016be11dd267f9275e6ad622a903895f9e55..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/model_restore.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import nnunet -import torch -from batchgenerators.utilities.file_and_folder_operations import * -import importlib -import pkgutil -from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer - - -def recursive_find_python_class(folder, trainer_name, current_module): - tr = None - for importer, modname, ispkg in pkgutil.iter_modules(folder): - # print(modname, ispkg) - if not ispkg: - m = importlib.import_module(current_module + "." + modname) - if hasattr(m, trainer_name): - tr = getattr(m, trainer_name) - break - - if tr is None: - for importer, modname, ispkg in pkgutil.iter_modules(folder): - if ispkg: - next_current_module = current_module + "." + modname - tr = recursive_find_python_class([join(folder[0], modname)], trainer_name, current_module=next_current_module) - if tr is not None: - break - - return tr - - -def restore_model(pkl_file, checkpoint=None, train=False, fp16=None): - """ - This is a utility function to load any nnUNet trainer from a pkl. It will recursively search - nnunet.trainig.network_training for the file that contains the trainer and instantiate it with the arguments saved in the pkl file. If checkpoint - is specified, it will furthermore load the checkpoint file in train/test mode (as specified by train). - The pkl file required here is the one that will be saved automatically when calling nnUNetTrainer.save_checkpoint. - :param pkl_file: - :param checkpoint: - :param train: - :param fp16: if None then we take no action. If True/False we overwrite what the model has in its init - :return: - """ - info = load_pickle(pkl_file) - init = info['init'] - name = info['name'] - search_in = join(nnunet.__path__[0], "training", "network_training") - tr = recursive_find_python_class([search_in], name, current_module="nnunet.training.network_training") - - if tr is None: - """ - Fabian only. This will trigger searching for trainer classes in other repositories as well - """ - try: - import meddec - search_in = join(meddec.__path__[0], "model_training") - tr = recursive_find_python_class([search_in], name, current_module="meddec.model_training") - except ImportError: - pass - - if tr is None: - raise RuntimeError("Could not find the model trainer specified in checkpoint in nnunet.trainig.network_training. If it " - "is not located there, please move it or change the code of restore_model. Your model " - "trainer can be located in any directory within nnunet.trainig.network_training (search is recursive)." - "\nDebug info: \ncheckpoint file: %s\nName of trainer: %s " % (checkpoint, name)) - assert issubclass(tr, nnUNetTrainer), "The network trainer was found but is not a subclass of nnUNetTrainer. " \ - "Please make it so!" - - # this is now deprecated - """if len(init) == 7: - print("warning: this model seems to have been saved with a previous version of nnUNet. Attempting to load it " - "anyways. Expect the unexpected.") - print("manually editing init args...") - init = [init[i] for i in range(len(init)) if i != 2]""" - - # ToDo Fabian make saves use kwargs, please... - - trainer = tr(*init) - - # We can hack fp16 overwriting into the trainer without changing the init arguments because nothing happens with - # fp16 in the init, it just saves it to a member variable - if fp16 is not None: - trainer.fp16 = fp16 - - trainer.process_plans(info['plans']) - if checkpoint is not None: - trainer.load_checkpoint(checkpoint, train) - return trainer - - -def load_best_model_for_inference(folder): - checkpoint = join(folder, "model_best.model") - pkl_file = checkpoint + ".pkl" - return restore_model(pkl_file, checkpoint, False) - - -def load_model_and_checkpoint_files(folder, folds=None, mixed_precision=None, checkpoint_name="model_best"): - """ - used for if you need to ensemble the five models of a cross-validation. This will restore the model from the - checkpoint in fold 0, load all parameters of the five folds in ram and return both. This will allow for fast - switching between parameters (as opposed to loading them form disk each time). - - This is best used for inference and test prediction - :param folder: - :param folds: - :param mixed_precision: if None then we take no action. If True/False we overwrite what the model has in its init - :return: - """ - if isinstance(folds, str): - folds = [join(folder, "all")] - assert isdir(folds[0]), "no output folder for fold %s found" % folds - elif isinstance(folds, (list, tuple)): - if len(folds) == 1 and folds[0] == "all": - folds = [join(folder, "all")] - else: - folds = [join(folder, "fold_%d" % i) for i in folds] - assert all([isdir(i) for i in folds]), "list of folds specified but not all output folders are present" - elif isinstance(folds, int): - folds = [join(folder, "fold_%d" % folds)] - assert all([isdir(i) for i in folds]), "output folder missing for fold %d" % folds - elif folds is None: - print("folds is None so we will automatically look for output folders (not using \'all\'!)") - folds = subfolders(folder, prefix="fold") - print("found the following folds: ", folds) - else: - raise ValueError("Unknown value for folds. Type: %s. Expected: list of int, int, str or None", str(type(folds))) - - trainer = restore_model(join(folds[0], "%s.model.pkl" % checkpoint_name), fp16=mixed_precision) - trainer.output_folder = folder - trainer.output_folder_base = folder - trainer.update_fold(0) - trainer.initialize(False) - all_best_model_files = [join(i, "%s.model" % checkpoint_name) for i in folds] - print("using the following model files: ", all_best_model_files) - all_params = [torch.load(i, map_location=torch.device('cpu')) for i in all_best_model_files] - return trainer, all_params - - -if __name__ == "__main__": - pkl = "/home/fabian/PhD/results/nnUNetV2/nnUNetV2_3D_fullres/Task004_Hippocampus/fold0/model_best.model.pkl" - checkpoint = pkl[:-4] - train = False - trainer = restore_model(pkl, checkpoint, train) diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_late_2.sh b/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_late_2.sh deleted file mode 100644 index 8dfbd5b35afacbc98d690fd85b83e5effe2b09f9..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_late_2.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 --gres=gpu:1 --time=24:00:00 -#SBATCH --job-name=Task503_glacier_mtl_late_2 - -export data_raw="/home/woody/iwi5/iwi5039h/data_raw" -export nnUNet_raw_data_base="/home/woody/iwi5/iwi5039h/nnUNet_data/nnUNet_raw_data_base/" -export nnUNet_preprocessed="/home/woody/iwi5/iwi5039h/nnUNet_data/nnUNet_preprocessed/" -export RESULTS_FOLDER="/home/woody/iwi5/iwi5039h/nnUNet_data/RESULTS_FOLDER" - -cd nnunet_glacer -pwd -conda activate nnunet - -python3 nnunet/dataset_conversion/Task503_Glacier_mtl.py -data_percentage 100 -base $data_raw -python3 nnunet/experiment_planning/nnUNet_plan_and_preprocess.py -t 503 -pl3d None -pl2d ExperimentPlanner2D_mtl - -python3 nnunet/run/run_training.py 2d nnUNetTrainerMTLlate 503 2 -p nnUNetPlans_mtl --disable_postprocessing_on_folds -python3 nnunet/inference/predict_simple.py -i $nnUNet_raw_data_base/nnUNet_raw_data/Task503_Glacier_mtl/imagesTs -o $RESULTS_FOLDER/test_predictions/Task503_Glacier_mtl_late/fold_2 -t 503 -m 2d -f 2 -p nnUNetPlans_mtl -tr nnUNetTrainerMTLlate -python3 nnunet/dataset_conversion/Task503_Glacier_mtl_reverse.py -i $RESULTS_FOLDER/test_predictions/Task503_Glacier_mtl_late/fold_2 -python3 ./evaluate_nnUNet.py --predictions $RESULTS_FOLDER/test_predictions/Task503_Glacier_mtl_late/fold_2/pngs --labels_fronts $data_raw/fronts/test --labels_zones $data_raw/zones/test --sar_images $data_raw/sar_images/test diff --git a/spaces/hololee/dreambooth-training/convertosd.py b/spaces/hololee/dreambooth-training/convertosd.py deleted file mode 100644 index 1211d34edf018b7c402a765c5a7ecdb684cc28e3..0000000000000000000000000000000000000000 --- a/spaces/hololee/dreambooth-training/convertosd.py +++ /dev/null @@ -1,302 +0,0 @@ -# Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint. -# *Only* converts the UNet, VAE, and Text Encoder. -# Does not convert optimizer state or any other thing. - -import argparse -import os.path as osp -import re - -import torch -import gc - -# =================# -# UNet Conversion # -# =================# - -unet_conversion_map = [ - # (stable-diffusion, HF Diffusers) - ("time_embed.0.weight", "time_embedding.linear_1.weight"), - ("time_embed.0.bias", "time_embedding.linear_1.bias"), - ("time_embed.2.weight", "time_embedding.linear_2.weight"), - ("time_embed.2.bias", "time_embedding.linear_2.bias"), - ("input_blocks.0.0.weight", "conv_in.weight"), - ("input_blocks.0.0.bias", "conv_in.bias"), - ("out.0.weight", "conv_norm_out.weight"), - ("out.0.bias", "conv_norm_out.bias"), - ("out.2.weight", "conv_out.weight"), - ("out.2.bias", "conv_out.bias"), -] - -unet_conversion_map_resnet = [ - # (stable-diffusion, HF Diffusers) - ("in_layers.0", "norm1"), - ("in_layers.2", "conv1"), - ("out_layers.0", "norm2"), - ("out_layers.3", "conv2"), - ("emb_layers.1", "time_emb_proj"), - ("skip_connection", "conv_shortcut"), -] - -unet_conversion_map_layer = [] -# hardcoded number of downblocks and resnets/attentions... -# would need smarter logic for other networks. -for i in range(4): - # loop over downblocks/upblocks - - for j in range(2): - # loop over resnets/attentions for downblocks - hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}." - sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0." - unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) - - if i < 3: - # no attention layers in down_blocks.3 - hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}." - sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1." - unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) - - for j in range(3): - # loop over resnets/attentions for upblocks - hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}." - sd_up_res_prefix = f"output_blocks.{3*i + j}.0." - unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) - - if i > 0: - # no attention layers in up_blocks.0 - hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}." - sd_up_atn_prefix = f"output_blocks.{3*i + j}.1." - unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) - - if i < 3: - # no downsample in down_blocks.3 - hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv." - sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op." - unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) - - # no upsample in up_blocks.3 - hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." - sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." - unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) - -hf_mid_atn_prefix = "mid_block.attentions.0." -sd_mid_atn_prefix = "middle_block.1." -unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) - -for j in range(2): - hf_mid_res_prefix = f"mid_block.resnets.{j}." - sd_mid_res_prefix = f"middle_block.{2*j}." - unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) - - -def convert_unet_state_dict(unet_state_dict): - # buyer beware: this is a *brittle* function, - # and correct output requires that all of these pieces interact in - # the exact order in which I have arranged them. - mapping = {k: k for k in unet_state_dict.keys()} - for sd_name, hf_name in unet_conversion_map: - mapping[hf_name] = sd_name - for k, v in mapping.items(): - if "resnets" in k: - for sd_part, hf_part in unet_conversion_map_resnet: - v = v.replace(hf_part, sd_part) - mapping[k] = v - for k, v in mapping.items(): - for sd_part, hf_part in unet_conversion_map_layer: - v = v.replace(hf_part, sd_part) - mapping[k] = v - new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()} - return new_state_dict - - -# ================# -# VAE Conversion # -# ================# - -vae_conversion_map = [ - # (stable-diffusion, HF Diffusers) - ("nin_shortcut", "conv_shortcut"), - ("norm_out", "conv_norm_out"), - ("mid.attn_1.", "mid_block.attentions.0."), -] - -for i in range(4): - # down_blocks have two resnets - for j in range(2): - hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}." - sd_down_prefix = f"encoder.down.{i}.block.{j}." - vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) - - if i < 3: - hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0." - sd_downsample_prefix = f"down.{i}.downsample." - vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) - - hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." - sd_upsample_prefix = f"up.{3-i}.upsample." - vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) - - # up_blocks have three resnets - # also, up blocks in hf are numbered in reverse from sd - for j in range(3): - hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}." - sd_up_prefix = f"decoder.up.{3-i}.block.{j}." - vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) - -# this part accounts for mid blocks in both the encoder and the decoder -for i in range(2): - hf_mid_res_prefix = f"mid_block.resnets.{i}." - sd_mid_res_prefix = f"mid.block_{i+1}." - vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) - - -vae_conversion_map_attn = [ - # (stable-diffusion, HF Diffusers) - ("norm.", "group_norm."), - ("q.", "query."), - ("k.", "key."), - ("v.", "value."), - ("proj_out.", "proj_attn."), -] - - -def reshape_weight_for_sd(w): - # convert HF linear weights to SD conv2d weights - return w.reshape(*w.shape, 1, 1) - - -def convert_vae_state_dict(vae_state_dict): - mapping = {k: k for k in vae_state_dict.keys()} - for k, v in mapping.items(): - for sd_part, hf_part in vae_conversion_map: - v = v.replace(hf_part, sd_part) - mapping[k] = v - for k, v in mapping.items(): - if "attentions" in k: - for sd_part, hf_part in vae_conversion_map_attn: - v = v.replace(hf_part, sd_part) - mapping[k] = v - new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} - weights_to_convert = ["q", "k", "v", "proj_out"] - print("Converting to CKPT ...") - for k, v in new_state_dict.items(): - for weight_name in weights_to_convert: - if f"mid.attn_1.{weight_name}.weight" in k: - print(f"Reshaping {k} for SD format") - new_state_dict[k] = reshape_weight_for_sd(v) - return new_state_dict - - -# =========================# -# Text Encoder Conversion # -# =========================# - - -textenc_conversion_lst = [ - # (stable-diffusion, HF Diffusers) - ("resblocks.", "text_model.encoder.layers."), - ("ln_1", "layer_norm1"), - ("ln_2", "layer_norm2"), - (".c_fc.", ".fc1."), - (".c_proj.", ".fc2."), - (".attn", ".self_attn"), - ("ln_final.", "transformer.text_model.final_layer_norm."), - ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), - ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), -] -protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} -textenc_pattern = re.compile("|".join(protected.keys())) - -# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp -code2idx = {"q": 0, "k": 1, "v": 2} - - -def convert_text_enc_state_dict_v20(text_enc_dict): - new_state_dict = {} - capture_qkv_weight = {} - capture_qkv_bias = {} - for k, v in text_enc_dict.items(): - if ( - k.endswith(".self_attn.q_proj.weight") - or k.endswith(".self_attn.k_proj.weight") - or k.endswith(".self_attn.v_proj.weight") - ): - k_pre = k[: -len(".q_proj.weight")] - k_code = k[-len("q_proj.weight")] - if k_pre not in capture_qkv_weight: - capture_qkv_weight[k_pre] = [None, None, None] - capture_qkv_weight[k_pre][code2idx[k_code]] = v - continue - - if ( - k.endswith(".self_attn.q_proj.bias") - or k.endswith(".self_attn.k_proj.bias") - or k.endswith(".self_attn.v_proj.bias") - ): - k_pre = k[: -len(".q_proj.bias")] - k_code = k[-len("q_proj.bias")] - if k_pre not in capture_qkv_bias: - capture_qkv_bias[k_pre] = [None, None, None] - capture_qkv_bias[k_pre][code2idx[k_code]] = v - continue - - relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k) - new_state_dict[relabelled_key] = v - - for k_pre, tensors in capture_qkv_weight.items(): - if None in tensors: - raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") - relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) - new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors) - - for k_pre, tensors in capture_qkv_bias.items(): - if None in tensors: - raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") - relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) - new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors) - - return new_state_dict - - -def convert_text_enc_state_dict(text_enc_dict): - return text_enc_dict - - -def convert(model_path, checkpoint_path): - unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin") - vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin") - text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin") - - # Convert the UNet model - unet_state_dict = torch.load(unet_path, map_location="cpu") - unet_state_dict = convert_unet_state_dict(unet_state_dict) - unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} - - # Convert the VAE model - vae_state_dict = torch.load(vae_path, map_location="cpu") - vae_state_dict = convert_vae_state_dict(vae_state_dict) - vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} - - # Convert the text encoder model - text_enc_dict = torch.load(text_enc_path, map_location="cpu") - - # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper - is_v20_model = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict - - if is_v20_model: - # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm - text_enc_dict = {"transformer." + k: v for k, v in text_enc_dict.items()} - text_enc_dict = convert_text_enc_state_dict_v20(text_enc_dict) - text_enc_dict = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()} - else: - text_enc_dict = convert_text_enc_state_dict(text_enc_dict) - text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} - - # Put together new checkpoint - state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict} - state_dict = {k: v.half() for k, v in state_dict.items()} - state_dict = {"state_dict": state_dict} - torch.save(state_dict, checkpoint_path) - del state_dict, text_enc_dict, vae_state_dict, unet_state_dict - torch.cuda.empty_cache() - gc.collect() - \ No newline at end of file diff --git a/spaces/hoppiece/yans_2023_trans4mer/README.md b/spaces/hoppiece/yans_2023_trans4mer/README.md deleted file mode 100644 index 9d72c0b343d92c5bec8f8482420e9b122240625f..0000000000000000000000000000000000000000 --- a/spaces/hoppiece/yans_2023_trans4mer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Yans 2023 Trans4mer -emoji: 🦀 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hs1l/Date/run.py b/spaces/hs1l/Date/run.py deleted file mode 100644 index d373747ec8f722c299f9128287f1fd384ea2eb4d..0000000000000000000000000000000000000000 --- a/spaces/hs1l/Date/run.py +++ /dev/null @@ -1,9 +0,0 @@ -from datetime import datetime -from os import system -from time import sleep - -while True: - time = datetime.now() - print(time.strftime('■ TiME: '+"[ %H : %M : %S ] ")) - sleep(1) - system("clear") \ No newline at end of file diff --git a/spaces/hugginglearners/Ethiopian-Food-Classifier/README.md b/spaces/hugginglearners/Ethiopian-Food-Classifier/README.md deleted file mode 100644 index 9aef5f9333d7beff3aadaa34b914daf4afb9aa66..0000000000000000000000000000000000000000 --- a/spaces/hugginglearners/Ethiopian-Food-Classifier/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Ethiopian Food Classifier -emoji: 👁 -colorFrom: purple -colorTo: yellow -sdk: gradio -sdk_version: 3.0.24 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hylee/apdrawing/README.md b/spaces/hylee/apdrawing/README.md deleted file mode 100644 index 6ff1b12c51f12b593b6f3cbe61084687bf00bebd..0000000000000000000000000000000000000000 --- a/spaces/hylee/apdrawing/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -python_version: 3.7 -title: Apdrawing -emoji: 💻 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/options/test_options.py b/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/options/test_options.py deleted file mode 100644 index f4aca5ef369ddd427dd87f31af03f31256d46176..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/options/test_options.py +++ /dev/null @@ -1,22 +0,0 @@ -"""This script contains the test options for Deep3DFaceRecon_pytorch -""" -from .base_options import BaseOptions - - -class TestOptions(BaseOptions): - """This class includes test options. - - It also includes shared options defined in BaseOptions. - """ - - def initialize(self, parser): - parser = BaseOptions.initialize(self, parser) # define shared options - parser.add_argument("--phase", type=str, default="test", help="train, val, test, etc") - parser.add_argument( - "--dataset_mode", type=str, default=None, help="chooses how datasets are loaded. [None | flist]" - ) - parser.add_argument("--img_folder", type=str, default="examples", help="folder for test images.") - - # Dropout and Batchnorm has different behavior during training and test. - self.isTrain = False - return parser diff --git a/spaces/iamironman4279/SadTalker/src/utils/preprocess.py b/spaces/iamironman4279/SadTalker/src/utils/preprocess.py deleted file mode 100644 index 0f784e6c3d8562e1db1bbd850b9f01843cee3c97..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/utils/preprocess.py +++ /dev/null @@ -1,170 +0,0 @@ -import numpy as np -import cv2, os, sys, torch -from tqdm import tqdm -from PIL import Image - -# 3dmm extraction -import safetensors -import safetensors.torch -from src.face3d.util.preprocess import align_img -from src.face3d.util.load_mats import load_lm3d -from src.face3d.models import networks - -from scipy.io import loadmat, savemat -from src.utils.croper import Preprocesser - - -import warnings - -from src.utils.safetensor_helper import load_x_from_safetensor -warnings.filterwarnings("ignore") - -def split_coeff(coeffs): - """ - Return: - coeffs_dict -- a dict of torch.tensors - - Parameters: - coeffs -- torch.tensor, size (B, 256) - """ - id_coeffs = coeffs[:, :80] - exp_coeffs = coeffs[:, 80: 144] - tex_coeffs = coeffs[:, 144: 224] - angles = coeffs[:, 224: 227] - gammas = coeffs[:, 227: 254] - translations = coeffs[:, 254:] - return { - 'id': id_coeffs, - 'exp': exp_coeffs, - 'tex': tex_coeffs, - 'angle': angles, - 'gamma': gammas, - 'trans': translations - } - - -class CropAndExtract(): - def __init__(self, sadtalker_path, device): - - self.propress = Preprocesser(device) - self.net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='').to(device) - - if sadtalker_path['use_safetensor']: - checkpoint = safetensors.torch.load_file(sadtalker_path['checkpoint']) - self.net_recon.load_state_dict(load_x_from_safetensor(checkpoint, 'face_3drecon')) - else: - checkpoint = torch.load(sadtalker_path['path_of_net_recon_model'], map_location=torch.device(device)) - self.net_recon.load_state_dict(checkpoint['net_recon']) - - self.net_recon.eval() - self.lm3d_std = load_lm3d(sadtalker_path['dir_of_BFM_fitting']) - self.device = device - - def generate(self, input_path, save_dir, crop_or_resize='crop', source_image_flag=False, pic_size=256): - - pic_name = os.path.splitext(os.path.split(input_path)[-1])[0] - - landmarks_path = os.path.join(save_dir, pic_name+'_landmarks.txt') - coeff_path = os.path.join(save_dir, pic_name+'.mat') - png_path = os.path.join(save_dir, pic_name+'.png') - - #load input - if not os.path.isfile(input_path): - raise ValueError('input_path must be a valid path to video/image file') - elif input_path.split('.')[-1] in ['jpg', 'png', 'jpeg']: - # loader for first frame - full_frames = [cv2.imread(input_path)] - fps = 25 - else: - # loader for videos - video_stream = cv2.VideoCapture(input_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - full_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - full_frames.append(frame) - if source_image_flag: - break - - x_full_frames= [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in full_frames] - - #### crop images as the - if 'crop' in crop_or_resize.lower(): # default crop - x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512) - clx, cly, crx, cry = crop - lx, ly, rx, ry = quad - lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) - oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad) - elif 'full' in crop_or_resize.lower(): - x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512) - clx, cly, crx, cry = crop - lx, ly, rx, ry = quad - lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) - oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad) - else: # resize mode - oy1, oy2, ox1, ox2 = 0, x_full_frames[0].shape[0], 0, x_full_frames[0].shape[1] - crop_info = ((ox2 - ox1, oy2 - oy1), None, None) - - frames_pil = [Image.fromarray(cv2.resize(frame,(pic_size, pic_size))) for frame in x_full_frames] - if len(frames_pil) == 0: - print('No face is detected in the input file') - return None, None - - # save crop info - for frame in frames_pil: - cv2.imwrite(png_path, cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)) - - # 2. get the landmark according to the detected face. - if not os.path.isfile(landmarks_path): - lm = self.propress.predictor.extract_keypoint(frames_pil, landmarks_path) - else: - print(' Using saved landmarks.') - lm = np.loadtxt(landmarks_path).astype(np.float32) - lm = lm.reshape([len(x_full_frames), -1, 2]) - - if not os.path.isfile(coeff_path): - # load 3dmm paramter generator from Deep3DFaceRecon_pytorch - video_coeffs, full_coeffs = [], [] - for idx in tqdm(range(len(frames_pil)), desc='3DMM Extraction In Video:'): - frame = frames_pil[idx] - W,H = frame.size - lm1 = lm[idx].reshape([-1, 2]) - - if np.mean(lm1) == -1: - lm1 = (self.lm3d_std[:, :2]+1)/2. - lm1 = np.concatenate( - [lm1[:, :1]*W, lm1[:, 1:2]*H], 1 - ) - else: - lm1[:, -1] = H - 1 - lm1[:, -1] - - trans_params, im1, lm1, _ = align_img(frame, lm1, self.lm3d_std) - - trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]).astype(np.float32) - im_t = torch.tensor(np.array(im1)/255., dtype=torch.float32).permute(2, 0, 1).to(self.device).unsqueeze(0) - - with torch.no_grad(): - full_coeff = self.net_recon(im_t) - coeffs = split_coeff(full_coeff) - - pred_coeff = {key:coeffs[key].cpu().numpy() for key in coeffs} - - pred_coeff = np.concatenate([ - pred_coeff['exp'], - pred_coeff['angle'], - pred_coeff['trans'], - trans_params[2:][None], - ], 1) - video_coeffs.append(pred_coeff) - full_coeffs.append(full_coeff.cpu().numpy()) - - semantic_npy = np.array(video_coeffs)[:,0] - - savemat(coeff_path, {'coeff_3dmm': semantic_npy, 'full_3dmm': np.array(full_coeffs)[0]}) - - return coeff_path, png_path, crop_info diff --git a/spaces/ifey/chatdemo/MyWidget/__init__.py b/spaces/ifey/chatdemo/MyWidget/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/imseldrith/BotX/Uploader/functions/help_Nekmo_ffmpeg.py b/spaces/imseldrith/BotX/Uploader/functions/help_Nekmo_ffmpeg.py deleted file mode 100644 index e66e1e5a7fb31f287c7f70f84ee808ac473858ab..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/BotX/Uploader/functions/help_Nekmo_ffmpeg.py +++ /dev/null @@ -1,174 +0,0 @@ -# MIT License - -# Copyright (c) 2022 Hash Minner - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE - -import os -import asyncio -from hachoir.parser import createParser -from hachoir.metadata import extractMetadata -import time -import logging -logging.basicConfig(level=logging.DEBUG, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') -logger = logging.getLogger(__name__) - - -async def place_water_mark(input_file, output_file, water_mark_file): - watermarked_file = f"{output_file}.watermark.png" - metadata = extractMetadata(createParser(input_file)) - width = metadata.get("width") - # https://stackoverflow.com/a/34547184/4723940 - shrink_watermark_file_genertor_command = [ - "ffmpeg", - "-i", - water_mark_file, - "-y -v quiet", - "-vf", - f"scale={width}*0.5:-1", - watermarked_file, - ] - - # print(shrink_watermark_file_genertor_command) - process = await asyncio.create_subprocess_exec( - *shrink_watermark_file_genertor_command, - # stdout must a pipe to be accessible as process.stdout - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - # Wait for the subprocess to finish - stdout, stderr = await process.communicate() - e_response = stderr.decode().strip() - t_response = stdout.decode().strip() - commands_to_execute = [ - "ffmpeg", - "-i", input_file, - "-i", watermarked_file, - "-filter_complex", - # https://stackoverflow.com/a/16235519 - # "\"[0:0] scale=400:225 [wm]; [wm][1:0] overlay=305:0 [out]\"", - # "-map \"[out]\" -b:v 896k -r 20 -an ", - "\"overlay=(main_w-overlay_w):(main_h-overlay_h)\"", - # "-vf \"drawtext=text='@FFMovingPictureExpertGroupBOT':x=W-(W/2):y=H-(H/2):fontfile=" + Config.FONT_FILE + ":fontsize=12:fontcolor=white:shadowcolor=black:shadowx=5:shadowy=5\"", - output_file - ] - # print(commands_to_execute) - process = await asyncio.create_subprocess_exec( - *commands_to_execute, - # stdout must a pipe to be accessible as process.stdout - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - # Wait for the subprocess to finish - stdout, stderr = await process.communicate() - e_response = stderr.decode().strip() - t_response = stdout.decode().strip() - return output_file - - -async def take_screen_shot(video_file, output_directory, ttl): - # https://stackoverflow.com/a/13891070/4723940 - out_put_file_name = output_directory + \ - "/" + str(time.time()) + ".jpg" - file_genertor_command = [ - "ffmpeg", - "-ss", - str(ttl), - "-i", - video_file, - "-vframes", - "1", - out_put_file_name - ] - # width = "90" - process = await asyncio.create_subprocess_exec( - *file_genertor_command, - # stdout must a pipe to be accessible as process.stdout - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - # Wait for the subprocess to finish - stdout, stderr = await process.communicate() - e_response = stderr.decode().strip() - t_response = stdout.decode().strip() - return out_put_file_name if os.path.lexists(out_put_file_name) else None - -# https://github.com/Nekmo/telegram-upload/blob/master/telegram_upload/video.py#L26 - - -async def cult_small_video(video_file, output_directory, start_time, end_time): - # https://stackoverflow.com/a/13891070/4723940 - out_put_file_name = output_directory + \ - "/" + str(round(time.time())) + ".mp4" - file_genertor_command = [ - "ffmpeg", - "-i", - video_file, - "-ss", - start_time, - "-to", - end_time, - "-async", - "1", - "-strict", - "-2", - out_put_file_name - ] - process = await asyncio.create_subprocess_exec( - *file_genertor_command, - # stdout must a pipe to be accessible as process.stdout - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - # Wait for the subprocess to finish - stdout, stderr = await process.communicate() - e_response = stderr.decode().strip() - t_response = stdout.decode().strip() - return out_put_file_name if os.path.lexists(out_put_file_name) else None - - -async def generate_screen_shots( - video_file, - output_directory, - is_watermarkable, - wf, - min_duration, - no_of_photos -): - metadata = extractMetadata(createParser(video_file)) - duration = 0 - if metadata is not None and metadata.has("duration"): - duration = metadata.get('duration').seconds - if duration > min_duration: - images = [] - ttl_step = duration // no_of_photos - current_ttl = ttl_step - for _ in range(no_of_photos): - ss_img = await take_screen_shot(video_file, output_directory, current_ttl) - current_ttl = current_ttl + ttl_step - if is_watermarkable: - ss_img = await place_water_mark( - ss_img, f"{output_directory}/{str(time.time())}.jpg", wf - ) - - images.append(ss_img) - return images - else: - return None diff --git a/spaces/innnky/nyaru-svc2.0-advanced/models.py b/spaces/innnky/nyaru-svc2.0-advanced/models.py deleted file mode 100644 index 6efb5c541e1b2726ea4feb0973cd59f37ec1e0fd..0000000000000000000000000000000000000000 --- a/spaces/innnky/nyaru-svc2.0-advanced/models.py +++ /dev/null @@ -1,556 +0,0 @@ -import math -import math - -import torch -from torch import nn -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn import functional as F -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -import attentions -import commons -import modules -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class PitchPredictor(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab # 音素的个数,中文和英文不同 - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.pitch_net = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, 1, 1) - - def forward(self, x, x_mask): - pitch_embedding = self.pitch_net(x * x_mask, x_mask) - pitch_embedding = pitch_embedding * x_mask - pred_pitch = self.proj(pitch_embedding) - return pred_pitch, pitch_embedding - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - # self.emb = nn.Embedding(n_vocab, hidden_channels) - # nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - self.emb_pitch = nn.Embedding(256, hidden_channels) - nn.init.normal_(self.emb_pitch.weight, 0.0, hidden_channels ** -0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, pitch): - # x = x.transpose(1,2) - # x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - # print(x.shape) - x = x + self.emb_pitch(pitch) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - # self.pitch_net = PitchPredictor(n_vocab, inter_channels, hidden_channels, filter_channels, n_heads, n_layers, - # kernel_size, p_dropout) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def infer(self, x, x_lengths, pitch, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, pitch) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - - w_ceil = w_ceil * 0 + 2 - # for index in range(w_ceil.shape[2]): - # if index%4 == 0: - # w_ceil[0,0,index] = 1.0 - - for i in range(w_ceil.shape[2]): - sep = 1 / 0.14 - if i * sep >= w_ceil.shape[2] * 2: - break - w_ceil[0, 0, int(i * sep / 2)] = 1 - - # print(w_ceil) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Chimera Tool !!TOP!! Crack Keygen 24golkes.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Chimera Tool !!TOP!! Crack Keygen 24golkes.md deleted file mode 100644 index 6742207bd4a928c1fd032928745723ce44a75e5e..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Chimera Tool !!TOP!! Crack Keygen 24golkes.md +++ /dev/null @@ -1,6 +0,0 @@ -

    chimera tool crack keygen 24golkes


    Download File ⚹⚹⚹ https://urlin.us/2uEvL8



    -
    -. .com/stories/2651835-photoscore-ultimate-7-crack-keygeninstmankl-jonakar . 7, 2561 BC — .. com/stories/2651835-photoscore-ultimate-7-crack-keygeninstmankl-jonakar . 2561 BC - . .com/stories/2651835-photoscore-ultimate-7-crack-keygeninstmankl-jonakar . 2561 BC - . .com/stories/2651835-photoscore-ultimate-7-crack-keygeninstmankl-jonakar . 2561 BC - . .com/stories/2651835-photoscore-ultimate-7-crack-keygeninstmankl-jonakar . 2561 BC - . .com/stories/2651835-photoscore-ultimate-7-crack-keygeninstmankl-jonakar . 8a78ff9644
    -
    -
    -

    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Facebook Hacker V 2.9.0 Activation Code __LINK__.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Facebook Hacker V 2.9.0 Activation Code __LINK__.md deleted file mode 100644 index 26a9460041322ccf03ab8914e1497f7c01839bdb..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Facebook Hacker V 2.9.0 Activation Code __LINK__.md +++ /dev/null @@ -1,32 +0,0 @@ -

    facebook hacker v 2.9.0 activation code


    DOWNLOADhttps://urlin.us/2uEwTL



    -
    - . . . You are certain to find an email that seems like ‘. . . . . . . . .’ you need to simply delete the email in their junk mail. That’s the plain and simple process to receive back in their junk mail the fake  . . . . . . . . .” - -The Proxies are fake as well as Fake Website owners. Simply put, you may choose to perform an error message that is being sent. It is not necessary for your own email address to often be included in junk message. - -So, you’re going to own the email that’s being sent by the hacker. But, after you receive it, how’s a person going to prove that the e-mail is fake? It is certainly not possible. - -Hacker Proxies to hide identity – So, it is a simple fix. Select the fake website by you by browsing through their landing page. - -This gives an appearance of a legitimate website since it really is a legitimate website from the owner of this. You can find a small text that is saying that you are a member of this. - -The purpose is to make it look like a real website so that you can be able to believe that the scam. This way, the scammer gets a fake email address. From this point, you’ll need to download the Trojan. - -Proxies and Reverse Proxy – That can be done using the free proxy software. - -How Proxy works – Reverse proxy allows you to visit web pages directly. All the proxy website works. - -When your web browser visits a website, the proxy website redirects it towards the real website. What a website is redirected to your web browser. - -Reverse proxy works differently. Instead of having to visit a site, you visit a site. The site acts as a intermediary between you and the site you’re visiting. - -What the reverse proxy does, is it sends the data to the proxy site and forwards it back to the site that you were trying to visit. - -How to Use Proxy with Torrents – The Proxy stands as a safeguard. - -If your proxy doesn’t work with torrents, there’s an easy fix for it. You simply have to open up the torrent client. - -Don’t forget 4fefd39f24
    -
    -
    -

    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Geosoft Oasis Montaj 8 Crack !!TOP!!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Geosoft Oasis Montaj 8 Crack !!TOP!!.md deleted file mode 100644 index 94b970c09c111f6595c6afbea761a97ee5c12cd7..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Geosoft Oasis Montaj 8 Crack !!TOP!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    geosoft oasis montaj 8 crack


    Download ✦✦✦ https://urlin.us/2uExo7



    - -Oasis montaj is ideally suited for today's multidisciplinary and collaborative exploration. Access all your data and a powerful set of mapping and ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Keygen Para Activar AutoCAD 2019 64 Bits.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Keygen Para Activar AutoCAD 2019 64 Bits.md deleted file mode 100644 index f4d49aa44ccbe789158fee1e7ba4eaff097fca36..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Keygen Para Activar AutoCAD 2019 64 Bits.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Keygen Para Activar AutoCAD 2019 64 Bits


    Download Filehttps://urlin.us/2uEy43



    -
    -3ds max 2009 64 bit keygen download autodesk 2012 xforce free.. Download ... Codigo Para Activar El Juego Scania Truck Driving Simulator. 1fdad05405
    -
    -
    -

    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/MobileSheets Pro Music Reader V2.1.2 [Patched].md b/spaces/inplisQlawa/anything-midjourney-v4-1/MobileSheets Pro Music Reader V2.1.2 [Patched].md deleted file mode 100644 index 43def2e5690dc79bdaee1951e9a312d9cf434646..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/MobileSheets Pro Music Reader V2.1.2 [Patched].md +++ /dev/null @@ -1,83 +0,0 @@ - -

    MobileSheets Pro Music Reader v2.1.2 [Patched]: A Review

    -

    If you are a musician who uses Android tablets, you might be interested in MobileSheets Pro Music Reader v2.1.2 [Patched], the latest version of the popular music score player app. MobileSheets Pro Music Reader v2.1.2 [Patched] is designed to make your life easier by allowing you to access any music score in your library in seconds, without carrying around books or binders. In this article, we will review some of the features and benefits of MobileSheets Pro Music Reader v2.1.2 [Patched] and how it can help you improve your performance and practice.

    -

    MobileSheets Pro Music Reader v2.1.2 [Patched]


    Download Filehttps://urlin.us/2uEwYI



    -

    What is MobileSheets Pro Music Reader v2.1.2 [Patched]?

    -

    MobileSheets Pro Music Reader v2.1.2 [Patched] is an app that lets you view and manage your music scores on your Android tablet. You can import your scores from various sources, such as images, PDFs, text files and professional chord files. You can also create your own scores using the built-in editor or by scanning them with your device's camera. MobileSheets Pro Music Reader v2.1.2 [Patched] supports multiple file formats and can display them in different modes, such as single page, two pages or half page.

    -

    What are the features of MobileSheets Pro Music Reader v2.1.2 [Patched]?

    -

    MobileSheets Pro Music Reader v2.1.2 [Patched] has many tools and features for musicians of all levels and genres, such as:

    -
      -
    • A custom audio player that lets you listen to audio tracks with your scores. You can adjust the volume, speed and pitch of the audio, as well as create a+b loops for practicing difficult sections.
    • -
    • A metronome that helps you keep time and rhythm with various display modes and sound effects.
    • -
    • Bookmarks that let you quickly and easily access specific parts of the scores.
    • -
    • Link points that let you jump between pages or sections of the scores with a single tap.
    • -
    • The ability to transpose the chords of a text or a professional chord file to any key.
    • -
    • Manual and automatic cropping that removes unnecessary margins and optimizes the display of the scores.
    • -
    • Setlists and collections that let you group songs efficiently and create playlists for different occasions.
    • -
    • Annotations that let you add notes, symbols, drawings and stamps to your scores.
    • -
    • A library manager that lets you organize your scores by various criteria, such as title, composer, genre, difficulty and more.
    • -
    • A backup and restore function that lets you save your data to your device or cloud storage.
    • -
    -

    What are the benefits of MobileSheets Pro Music Reader v2.1.2 [Patched]?

    -

    MobileSheets Pro Music Reader v2.1.2 [Patched] offers many benefits for musicians who want to enhance their performance and practice, such as:

    -

    -
      -
    • It saves you time and space by eliminating the need to carry around books or binders of music scores.
    • -
    • It gives you instant access to any score in your library with a simple search or browse function.
    • -
    • It lets you customize your viewing experience by adjusting the brightness, contrast, zoom level and orientation of the scores.
    • -
    • It helps you improve your skills by providing tools for practicing along with audio tracks, metronome and annotations.
    • -
    • It lets you perform with confidence by providing tools for navigating between pages, sections and bookmarks.
    • -
    • It lets you share your scores with other musicians by exporting them as PDFs, images or text files.
    • -
    -

    How to download MobileSheets Pro Music Reader v2.1.2 [Patched]?

    -

    If you want to try MobileSheets Pro Music Reader v2.1.2 [Patched], you can download it from APK Home for free. APK Home is a trusted source of APK files for Android devices that offers safe and fast downloads of various apps and games. To download MobileSheets Pro Music Reader v2.1.2 [Patched] from APK Home, follow these steps:

    -
      -
    1. Go to https://apkhome.io/mobilesheetspro-music-reader-2-1-2-apk/ on your device's browser.
    2. -
    3. Tap on the download button at the bottom of the page.
    4. -
    5. Wait for the download to complete and then open the APK file.
    6. -
    7. Follow the installation instructions on your screen.
    8. -
    9. Enjoy using MobileSheets Pro Music Reader v2.1.2 [Patched] on your Android tablet.
    10. -

    -

    How to use MobileSheets Pro Music Reader v2.1.2 [Patched]?

    -

    Using MobileSheets Pro Music Reader v2.1.2 [Patched] is easy and intuitive. You can start by importing your music scores from your device's storage, cloud storage, web browser or other apps. You can also create your own scores using the built-in editor or by scanning them with your device's camera. Once you have imported your scores, you can view them in different modes, such as single page, two pages or half page. You can also adjust the brightness, contrast, zoom level and orientation of the scores to suit your preference. You can use the custom audio player to listen to audio tracks with your scores. You can adjust the volume, speed and pitch of the audio, as well as create a+b loops for practicing difficult sections. You can use the metronome to keep time and rhythm with various display modes and sound effects. You can use bookmarks to quickly and easily access specific parts of the scores. You can use link points to jump between pages or sections of the scores with a single tap. You can transpose the chords of a text or a professional chord file to any key. You can crop the scores manually or automatically to remove unnecessary margins and optimize the display of the scores. You can create setlists and collections to group songs efficiently and create playlists for different occasions. You can annotate your scores with notes, symbols, drawings and stamps. You can organize your scores by various criteria, such as title, composer, genre, difficulty and more. You can backup and restore your data to your device or cloud storage.

    -

    What are the pros and cons of MobileSheets Pro Music Reader v2.1.2 [Patched]?

    -

    MobileSheets Pro Music Reader v2.1.2 [Patched] is a powerful and versatile app that offers many advantages for musicians who use Android tablets, such as:

    -
      -
    • It is the first music score player for Android tablets that supports multiple file formats and display modes.
    • -
    • It has many tools and features for musicians of all levels and genres.
    • -
    • It saves you time and space by eliminating the need to carry around books or binders of music scores.
    • -
    • It gives you instant access to any score in your library with a simple search or browse function.
    • -
    • It lets you customize your viewing experience by adjusting the brightness, contrast, zoom level and orientation of the scores.
    • -
    • It helps you improve your skills by providing tools for practicing along with audio tracks, metronome and annotations.
    • -
    • It lets you perform with confidence by providing tools for navigating between pages, sections and bookmarks.
    • -
    • It lets you share your scores with other musicians by exporting them as PDFs, images or text files.
    • -
    -

    However, MobileSheets Pro Music Reader v2.1.2 [Patched] also has some drawbacks that you should be aware of, such as:

    -
      -
    • It requires Android 4.0 or higher to run.
    • -
    • It may not be compatible with some devices or screen sizes.
    • -
    • It may consume a lot of battery power when using some features such as audio playback or scanning.
    • -
    • It may not support some file formats or features that are available in other apps or platforms.
    • -
    -

    Conclusion

    -

    MobileSheets Pro Music Reader v2.1.2 [Patched] is a great app for musicians who use Android tablets to view and manage their music scores. It has many tools and features that make it easy and convenient to access any score in your library in seconds, without carrying around books or binders. It also helps you improve your performance and practice by providing tools for practicing along with audio tracks, metronome and annotations. It also lets you share your scores with other musicians by exporting them as PDFs, images or text files. If you are looking for a music score player app that is powerful and versatile, you should give MobileSheets Pro Music Reader v2.1.2 [Patched] a try.

    -

    How to get support for MobileSheets Pro Music Reader v2.1.2 [Patched]?

    -

    If you have any questions, issues or feedback about MobileSheets Pro Music Reader v2.1.2 [Patched], you can get support from the developer and the community in various ways, such as:

    -
      -
    • Contacting the developer directly by email at mike@zubersoft.com or by phone at +1 425-999-4599.
    • -
    • Visiting the official website at https://www.zubersoft.com/mobilesheets/ for more information, tutorials and FAQs.
    • -
    • Joining the MobileSheets Forums at https://zubersoft.com/mobilesheets/forum/ to interact with other users, share tips and tricks, request features and report bugs.
    • -
    • Following the MobileSheets Facebook page at https://www.facebook.com/MobileSheets/ to get the latest news and updates.
    • -
    • Leaving a review or rating on the Google Play Store or the Microsoft Store to share your experience and feedback.
    • -
    -

    What are the alternatives to MobileSheets Pro Music Reader v2.1.2 [Patched]?

    -

    If you are looking for other apps that can help you view and manage your music scores on your Android tablet, you might want to check out some of these alternatives:

    -
      -
    • MuseScore: MuseScore is a diverse music notation software that can not only manage and edit musical data, but also conduct the composition, and help you give your music a professional finishing touch. You can import and export scores in various formats, such as PDF, MIDI, MusicXML and more. You can also play back your scores with realistic sound effects and share them online with other musicians. MuseScore is free and open source, and you can download it from https://musescore.org/.
    • -
    • forScore: forScore is a powerful and elegant app that lets you view, organize and annotate your music scores on your Android tablet. You can import your scores from various sources, such as PDFs, images or cloud storage. You can also create your own scores using the built-in editor or by scanning them with your device's camera. forScore supports multiple file formats and display modes, and has many tools for musicians, such as metronome, tuner, pitch pipe, audio player and more. forScore costs $9.99 and you can download it from https://forscore.co/android/.
    • -
    • Orpheus Sheet Music Pro: Orpheus Sheet Music Pro is a simple and intuitive app that lets you view your music scores on your Android tablet. You can import your scores from various sources, such as PDFs, images or cloud storage. You can also create your own scores using the built-in editor or by scanning them with your device's camera. Orpheus Sheet Music Pro supports multiple file formats and display modes, and has some tools for musicians, such as metronome, audio player and bookmarks. Orpheus Sheet Music Pro costs $4.99 and you can download it from https://orpheus-app.com/.
    • -
    -

    Conclusion

    -

    MobileSheets Pro Music Reader v2.1.2 [Patched] is a great app for musicians who use Android tablets to view and manage their music scores. It has many tools and features that make it easy and convenient to access any score in your library in seconds, without carrying around books or binders. It also helps you improve your performance and practice by providing tools for practicing along with audio tracks, metronome and annotations. It also lets you share your scores with other musicians by exporting them as PDFs, images or text files. If you are looking for a music score player app that is powerful and versatile, you should give MobileSheets Pro Music Reader v2.1.2 [Patched] a try. You can download it from APK Home for free and enjoy its benefits. Alternatively, you can also check out some of the other apps that can help you view and manage your music scores on your Android tablet, such as MuseScore, forScore and Orpheus Sheet Music Pro.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Activation Code For Ilok For Avid Pro Tools Hd 10.3.2l !FREE!.md b/spaces/inreVtussa/clothingai/Examples/Activation Code For Ilok For Avid Pro Tools Hd 10.3.2l !FREE!.md deleted file mode 100644 index 8c083309301a4b7b4724be8b6c56900159e3090a..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Activation Code For Ilok For Avid Pro Tools Hd 10.3.2l !FREE!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Activation Code For Ilok For Avid Pro Tools Hd 10.3.2l


    DOWNLOAD →→→ https://tiurll.com/2uCmgZ



    - -apetools.ilok.ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools.ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools_1000.apetools_ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools.ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools_files_1000.apetools_ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools.ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools_label_ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools.ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools_picture_ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools.ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools_start_menu_ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools.ilok_activation_code_for_windows_10_v_1511_aio_x86_x64_18in1_may2016_apetools_type_ilok_activation_code_for_windows_10_v_1511_aio_x86_x 4fefd39f24
    -
    -
    -

    diff --git a/spaces/inreVtussa/clothingai/Examples/Auto Hide IP V5.6.4.2 Patch [CracksNow] Crack.md b/spaces/inreVtussa/clothingai/Examples/Auto Hide IP V5.6.4.2 Patch [CracksNow] Crack.md deleted file mode 100644 index 23641f99f08ed99fb63e530a83923eb73b8b05c7..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Auto Hide IP V5.6.4.2 Patch [CracksNow] Crack.md +++ /dev/null @@ -1,11 +0,0 @@ -

    Auto Hide IP v5.6.4.2 Patch [CracksNow] crack


    Download Zip ★★★ https://tiurll.com/2uCky3



    - -After you make sure it is positioned correctly, fix the shield module. You have the following options to specify the IP address of the inverter: . Update to Globus Toolkit 5.0.4 - Fix doxygen markup - Update to Globus Toolkit. It provides all the functionality you might need for your application. -Globus Toolkit provides features that can be used in applications and websites. -This chapter describes how to develop and build an application. -I've just started learning C++ and I'm using the Globus Toolkit as my project build tutorial/tutorial. -Question: What are the options for creating a project in the Globus Toolkit? -I used the project tutorial on GitHub (https://github.com/sjlal/GlobusToolkit 8a78ff9644
    -
    -
    -

    diff --git a/spaces/inreVtussa/clothingai/Examples/Code Produit Origin Sims 4 Crack 27.md b/spaces/inreVtussa/clothingai/Examples/Code Produit Origin Sims 4 Crack 27.md deleted file mode 100644 index b5d3a583b6b3c3d165ab95d5fc35ddb1bb98c7e2..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Code Produit Origin Sims 4 Crack 27.md +++ /dev/null @@ -1,88 +0,0 @@ - -

    Code Produit Origin Sims 4 Crack 27: How to Play The Sims 4 for Free

    - -

    The Sims 4 is one of the most popular life simulation games in the world. It allows you to create and control your own virtual characters, called Sims, and explore various aspects of their lives, such as careers, relationships, hobbies, and more. The game also offers a variety of expansion packs and game packs that add new features and content to the base game.

    - -

    However, The Sims 4 is not a cheap game. The base game costs $39.99 on Origin, the official digital distribution platform for EA games. The expansion packs cost $39.99 each, the game packs cost $19.99 each, and the stuff packs cost $9.99 each. If you want to buy all the available packs for The Sims 4, you would have to spend over $500.

    -

    code produit origin sims 4 crack 27


    Download --->>> https://tiurll.com/2uCjmX



    - -

    That's why many people are looking for ways to play The Sims 4 for free. One of the most common methods is to use a crack, which is a modified version of the game that bypasses the activation process and allows you to play without paying. One of the most popular cracks for The Sims 4 is the Codex crack, which is updated regularly to include the latest patches and packs.

    - -

    What is Code Produit Origin Sims 4 Crack 27?

    - -

    Code Produit Origin Sims 4 Crack 27 is the name of a search query that people use to find and download the Codex crack for The Sims 4. The number 27 refers to the version of the crack, which corresponds to the version of the game. As of November 2022, the latest version of The Sims 4 is v1.81.15.1030, which includes the Cottage Living expansion pack and the latest updates.

    - -

    The Codex crack for The Sims 4 is a file that replaces the original game executable and allows you to play without Origin, the online service that verifies your game license and connects you to other players. The Codex crack also includes all the previous expansion packs, game packs, and stuff packs that have been released for The Sims 4.

    - -

    To use the Codex crack for The Sims 4, you need to have a copy of the base game installed on your computer. You can either buy it from Origin or download it from a torrent site. Then, you need to download the Codex crack from a reliable source, such as Reddit or Pirate Bay. You need to extract the files from the crack archive and copy them to your game folder, replacing the original files. After that, you can launch the game from the cracked executable and enjoy playing The Sims 4 for free.

    - -

    What are the advantages and disadvantages of using Code Produit Origin Sims 4 Crack 27?

    - -

    Using Code Produit Origin Sims 4 Crack 27 has some advantages and disadvantages that you should be aware of before deciding to use it.

    - -

    The main advantage of using Code Produit Origin Sims 4 Crack 27 is that you can play The Sims 4 for free, without spending any money on buying the game or its packs. You can also access all the content that has been released for The Sims 4 so far, including the latest expansion pack Cottage Living. You can also play offline, without needing an internet connection or an Origin account.

    -

    - -

    The main disadvantage of using Code Produit Origin Sims 4 Crack 27 is that you are violating the terms of service and copyright laws of EA, the publisher of The Sims 4. This means that you are risking legal action from EA if they find out that you are using a cracked version of their game. You are also exposing your computer to potential viruses and malware that may be hidden in the crack files or in the sites where you download them from. You are also missing out on some features and benefits that come with playing The Sims 4 on Origin, such as online multiplayer, cloud saves, achievements, gallery access, customer support, and more.

    - -

    Is Code Produit Origin Sims 4 Crack 27 safe to use?

    - -

    Code Produit Origin Sims 4 Crack

    -

    Code Produit Origin Sims 4 Crack 27 is not safe to use for several reasons.

    - -

    First of all, using Code Produit Origin Sims 4 Crack 27 is illegal and unethical. You are stealing from EA and depriving them of their rightful revenue from selling their game and its packs. You are also disrespecting the developers and creators who worked hard to make The Sims 4 a fun and enjoyable game for millions of players around the world.

    - -

    Secondly, using Code Produit Origin Sims 4 Crack 27 is risky and dangerous. You are downloading files from unknown sources that may contain viruses or malware that can harm your computer or steal your personal information. You are also modifying your game files in a way that may cause errors or glitches in your gameplay or even damage your game installation beyond repair.

    - -

    Thirdly, using Code Produit Origin Sims 4 Crack 27 is unsatisfying and limiting. You are playing an outdated and incomplete version of The Sims 4 that may not work properly or have all the features and content that EA offers through Origin. You are also missing out on the online community and social aspects of The Sims 4 that make it more fun and engaging.

    - -

    What are some alternatives to using Code Produit Origin Sims 4 Crack 27?

    - -

    If you want to play The Sims 4 without using Code Produit Origin Sims 4 Crack 27, you have some alternatives that are legal, safe, and satisfying.

    - -

    One alternative is to buy The Sims 4 from Origin or from a trusted retailer. This way, you can support EA and the developers of The Sims 4, and enjoy the full and updated version of the game with all its features and content. You can also access Origin's online services, such as multiplayer, cloud saves, achievements, gallery, customer support, and more. You can also take advantage of Origin's sales and discounts, which often offer The Sims 4 and its packs at lower prices.

    - -

    Another alternative is to subscribe to Origin Access Premier, which is a subscription service that gives you unlimited access to The Sims 4 and over 200 other EA games for a monthly or annual fee. You can play The Sims 4 with all its expansion packs, game packs, and stuff packs included, as well as new releases as they become available. You can also enjoy other benefits, such as early access to new games, exclusive content, and a 10% discount on Origin purchases.

    - -

    A third alternative is to use mods and custom content for The Sims 4, which are user-created additions or modifications that enhance or change the game in various ways. You can find thousands of mods and custom content for The Sims 4 on sites like Mod The Sims, The Sims Resource, or Tumblr. You can download and install them for free, and use them to customize your game according to your preferences and tastes. However, you should be careful when using mods and custom content, as they may not be compatible with your game version or with each other, and may cause errors or glitches in your gameplay. You should also backup your game files before installing any mods or custom content, and remove them if they cause any problems.

    - -

    Conclusion

    - -

    The Sims 4 is a wonderful game that lets you create and control your own virtual characters and explore their lives in various ways. However, playing The Sims 4 for free using Code Produit Origin Sims 4 Crack 27 is not a good idea, as it is illegal, unsafe, and unsatisfying. Instead, you should buy The Sims 4 from Origin or a trusted retailer, subscribe to Origin Access Premier, or use mods and custom content to enhance your game experience. This way, you can enjoy The Sims 4 legally, safely, and satisfyingly.

    - - -- You can add some images or videos to the article to make it more visually appealing and engaging. -- You can add some subheadings or bullet points to the article to make it easier to read and scan. -- You can add some links or references to the article to support your claims and provide more information to the readers. -- You can add some keywords or phrases related to "code produit origin sims 4 crack 27" to the article to make it more relevant and optimized for SEO. -- You can use a tool like Grammarly or Hemingway to check the article for grammar and spelling errors, and to improve the clarity and readability of the article. - - -- You can write a paragraph about the history and development of The Sims 4, and how it has evolved over the years with new updates and packs. -- You can write a paragraph about the different genres and themes of The Sims 4, and how they appeal to different types of players and preferences. -- You can write a paragraph about the future plans and expectations for The Sims 4, and what new features and content EA may release in the coming months or years. -- You can write a paragraph about your personal experience and opinion of The Sims 4, and why you like or dislike the game. - - -- You can write a paragraph about the history and development of The Sims 4, and how it has evolved over the years with new updates and packs. -- You can write a paragraph about the different genres and themes of The Sims 4, and how they appeal to different types of players and preferences. -- You can write a paragraph about the future plans and expectations for The Sims 4, and what new features and content EA may release in the coming months or years. -- You can write a paragraph about your personal experience and opinion of The Sims 4, and why you like or dislike the game. - - -- You can write a paragraph about the history and development of The Sims 4, and how it has evolved over the years with new updates and packs. -- You can write a paragraph about the different genres and themes of The Sims 4, and how they appeal to different types of players and preferences. -- You can write a paragraph about the future plans and expectations for The Sims 4, and what new features and content EA may release in the coming months or years. -- You can write a paragraph about your personal experience and opinion of The Sims 4, and why you like or dislike the game. - - -- You can add some images or videos to the article to make it more visually appealing and engaging. -- You can add some subheadings or bullet points to the article to make it easier to read and scan. -- You can add some links or references to the article to support your claims and provide more information to the readers. -- You can add some keywords or phrases related to "code produit origin sims 4 crack 27" to the article to make it more relevant and optimized for SEO. -- You can use a tool like Grammarly or Hemingway to check the article for grammar and spelling errors, and to improve the clarity and readability of the article. -

    Conclusion

    - -

    The Sims 4 is a wonderful game that lets you create and control your own virtual characters and explore their lives in various ways. However, playing The Sims 4 for free using Code Produit Origin Sims 4 Crack 27 is not a good idea, as it is illegal, unsafe, and unsatisfying. Instead, you should buy The Sims 4 from Origin or a trusted retailer, subscribe to Origin Access Premier, or use mods and custom content to enhance your game experience. This way, you can enjoy The Sims 4 legally, safely, and satisfyingly.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/iqovocn/ChuanhuChatGPT/modules/models/minimax.py b/spaces/iqovocn/ChuanhuChatGPT/modules/models/minimax.py deleted file mode 100644 index 2e1b50280fd2fbc43a69caaf660a0d64beaa405b..0000000000000000000000000000000000000000 --- a/spaces/iqovocn/ChuanhuChatGPT/modules/models/minimax.py +++ /dev/null @@ -1,161 +0,0 @@ -import json -import os - -import colorama -import requests -import logging - -from modules.models.base_model import BaseLLMModel -from modules.presets import STANDARD_ERROR_MSG, GENERAL_ERROR_MSG, TIMEOUT_STREAMING, TIMEOUT_ALL, i18n - -group_id = os.environ.get("MINIMAX_GROUP_ID", "") - - -class MiniMax_Client(BaseLLMModel): - """ - MiniMax Client - 接口文档见 https://api.minimax.chat/document/guides/chat - """ - - def __init__(self, model_name, api_key, user_name="", system_prompt=None): - super().__init__(model_name=model_name, user=user_name) - self.url = f'https://api.minimax.chat/v1/text/chatcompletion?GroupId={group_id}' - self.history = [] - self.api_key = api_key - self.system_prompt = system_prompt - self.headers = { - "Authorization": f"Bearer {api_key}", - "Content-Type": "application/json" - } - - def get_answer_at_once(self): - # minimax temperature is (0,1] and base model temperature is [0,2], and yuan 0.9 == base 1 so need to convert - temperature = self.temperature * 0.9 if self.temperature <= 1 else 0.9 + (self.temperature - 1) / 10 - - request_body = { - "model": self.model_name.replace('minimax-', ''), - "temperature": temperature, - "skip_info_mask": True, - 'messages': [{"sender_type": "USER", "text": self.history[-1]['content']}] - } - if self.n_choices: - request_body['beam_width'] = self.n_choices - if self.system_prompt: - request_body['prompt'] = self.system_prompt - if self.max_generation_token: - request_body['tokens_to_generate'] = self.max_generation_token - if self.top_p: - request_body['top_p'] = self.top_p - - response = requests.post(self.url, headers=self.headers, json=request_body) - - res = response.json() - answer = res['reply'] - total_token_count = res["usage"]["total_tokens"] - return answer, total_token_count - - def get_answer_stream_iter(self): - response = self._get_response(stream=True) - if response is not None: - iter = self._decode_chat_response(response) - partial_text = "" - for i in iter: - partial_text += i - yield partial_text - else: - yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG - - def _get_response(self, stream=False): - minimax_api_key = self.api_key - history = self.history - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {minimax_api_key}", - } - - temperature = self.temperature * 0.9 if self.temperature <= 1 else 0.9 + (self.temperature - 1) / 10 - - messages = [] - for msg in self.history: - if msg['role'] == 'user': - messages.append({"sender_type": "USER", "text": msg['content']}) - else: - messages.append({"sender_type": "BOT", "text": msg['content']}) - - request_body = { - "model": self.model_name.replace('minimax-', ''), - "temperature": temperature, - "skip_info_mask": True, - 'messages': messages - } - if self.n_choices: - request_body['beam_width'] = self.n_choices - if self.system_prompt: - lines = self.system_prompt.splitlines() - if lines[0].find(":") != -1 and len(lines[0]) < 20: - request_body["role_meta"] = { - "user_name": lines[0].split(":")[0], - "bot_name": lines[0].split(":")[1] - } - lines.pop() - request_body["prompt"] = "\n".join(lines) - if self.max_generation_token: - request_body['tokens_to_generate'] = self.max_generation_token - else: - request_body['tokens_to_generate'] = 512 - if self.top_p: - request_body['top_p'] = self.top_p - - if stream: - timeout = TIMEOUT_STREAMING - request_body['stream'] = True - request_body['use_standard_sse'] = True - else: - timeout = TIMEOUT_ALL - try: - response = requests.post( - self.url, - headers=headers, - json=request_body, - stream=stream, - timeout=timeout, - ) - except: - return None - - return response - - def _decode_chat_response(self, response): - error_msg = "" - for chunk in response.iter_lines(): - if chunk: - chunk = chunk.decode() - chunk_length = len(chunk) - print(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") - error_msg += chunk - continue - if chunk_length > 6 and "delta" in chunk["choices"][0]: - if "finish_reason" in chunk["choices"][0] and chunk["choices"][0]["finish_reason"] == "stop": - self.all_token_counts.append(chunk["usage"]["total_tokens"] - sum(self.all_token_counts)) - break - try: - yield chunk["choices"][0]["delta"] - except Exception as e: - logging.error(f"Error: {e}") - continue - if error_msg: - try: - error_msg = json.loads(error_msg) - if 'base_resp' in error_msg: - status_code = error_msg['base_resp']['status_code'] - status_msg = error_msg['base_resp']['status_msg'] - raise Exception(f"{status_code} - {status_msg}") - except json.JSONDecodeError: - pass - raise Exception(error_msg) diff --git a/spaces/israelgonzalezb/stable-diffusion/README.md b/spaces/israelgonzalezb/stable-diffusion/README.md deleted file mode 100644 index dad46a174fff4154a687e56c4ddc5826afe099bc..0000000000000000000000000000000000000000 --- a/spaces/israelgonzalezb/stable-diffusion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stable Diffusion 2 -emoji: 🔮 -colorFrom: gray -colorTo: pink -sdk: static -pinned: true -license: mit - ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ivy-1911/vits-uma-genshin-honkai/attentions.py b/spaces/ivy-1911/vits-uma-genshin-honkai/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/ivy-1911/vits-uma-genshin-honkai/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/jackli888/stable-diffusion-webui/modules/face_restoration.py b/spaces/jackli888/stable-diffusion-webui/modules/face_restoration.py deleted file mode 100644 index 2c86c6ccce338a1411f4367a0bc6e4046ad67cae..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/modules/face_restoration.py +++ /dev/null @@ -1,19 +0,0 @@ -from modules import shared - - -class FaceRestoration: - def name(self): - return "None" - - def restore(self, np_image): - return np_image - - -def restore_faces(np_image): - face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None] - if len(face_restorers) == 0: - return np_image - - face_restorer = face_restorers[0] - - return face_restorer.restore(np_image) diff --git a/spaces/jackyccl/segment-anything/groundingdino/config/GroundingDINO_SwinT_OGC.py b/spaces/jackyccl/segment-anything/groundingdino/config/GroundingDINO_SwinT_OGC.py deleted file mode 100644 index 9158d5f6260ec74bded95377d382387430d7cd70..0000000000000000000000000000000000000000 --- a/spaces/jackyccl/segment-anything/groundingdino/config/GroundingDINO_SwinT_OGC.py +++ /dev/null @@ -1,43 +0,0 @@ -batch_size = 1 -modelname = "groundingdino" -backbone = "swin_T_224_1k" -position_embedding = "sine" -pe_temperatureH = 20 -pe_temperatureW = 20 -return_interm_indices = [1, 2, 3] -backbone_freeze_keywords = None -enc_layers = 6 -dec_layers = 6 -pre_norm = False -dim_feedforward = 2048 -hidden_dim = 256 -dropout = 0.0 -nheads = 8 -num_queries = 900 -query_dim = 4 -num_patterns = 0 -num_feature_levels = 4 -enc_n_points = 4 -dec_n_points = 4 -two_stage_type = "standard" -two_stage_bbox_embed_share = False -two_stage_class_embed_share = False -transformer_activation = "relu" -dec_pred_bbox_embed_share = True -dn_box_noise_scale = 1.0 -dn_label_noise_ratio = 0.5 -dn_label_coef = 1.0 -dn_bbox_coef = 1.0 -embed_init_tgt = True -dn_labelbook_size = 2000 -max_text_len = 256 -text_encoder_type = "bert-base-uncased" -use_text_enhancer = True -use_fusion_layer = True -use_checkpoint = True -use_transformer_ckpt = True -use_text_cross_attention = True -text_dropout = 0.0 -fusion_dropout = 0.0 -fusion_droppath = 0.1 -sub_sentence_present = True diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/app/interface/ai-clip-factory/index.tsx b/spaces/jbilcke-hf/ai-comic-factory/src/app/interface/ai-clip-factory/index.tsx deleted file mode 100644 index ce8e9d3c910be49ec99b46d9c7c22ee5e7783a86..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-comic-factory/src/app/interface/ai-clip-factory/index.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import { Button } from "@/components/ui/button" - -export function AIClipFactory() { - return ( - - ) -} \ No newline at end of file diff --git a/spaces/jhwen/bingo/src/components/chat-suggestions.tsx b/spaces/jhwen/bingo/src/components/chat-suggestions.tsx deleted file mode 100644 index 48aec7c84e4407c482acdfcc7857fb0f660d12d3..0000000000000000000000000000000000000000 --- a/spaces/jhwen/bingo/src/components/chat-suggestions.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import React, { useMemo } from 'react' -import Image from 'next/image' -import HelpIcon from '@/assets/images/help.svg' -import { SuggestedResponse } from '@/lib/bots/bing/types' -import { useBing } from '@/lib/hooks/use-bing' -import { atom, useAtom } from 'jotai' - -type Suggestions = SuggestedResponse[] -const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text })) -const suggestionsAtom = atom([]) - -type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick, 'setInput'> & { suggestions?: Suggestions } - -export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) { - const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom) - const toggleSuggestions = (() => { - if (currentSuggestions === helpSuggestions) { - setSuggestions(suggestions) - } else { - setSuggestions(helpSuggestions) - } - }) - - useMemo(() => { - setSuggestions(suggestions) - window.scrollBy(0, 2000) - }, [suggestions.length, setSuggestions]) - - return currentSuggestions?.length ? ( -
    -
    - - { - currentSuggestions.map(suggestion => ( - - )) - } -
    -
    - ) : null -} diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_ARC2.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_ARC2.py deleted file mode 100644 index fd9448c1d55c48891da3fbf4e86c0a730f2ecb53..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_ARC2.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -# -# SelfTest/Cipher/ARC2.py: Self-test for the Alleged-RC2 cipher -# -# Written in 2008 by Dwayne C. Litzenberger -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -"""Self-test suite for Crypto.Cipher.ARC2""" - -import unittest - -from Crypto.Util.py3compat import b, bchr - -from Crypto.Cipher import ARC2 - -# This is a list of (plaintext, ciphertext, key[, description[, extra_params]]) tuples. -test_data = [ - # Test vectors from RFC 2268 - - # 63-bit effective key length - ('0000000000000000', 'ebb773f993278eff', '0000000000000000', - 'RFC2268-1', dict(effective_keylen=63)), - - # 64-bit effective key length - ('ffffffffffffffff', '278b27e42e2f0d49', 'ffffffffffffffff', - 'RFC2268-2', dict(effective_keylen=64)), - ('1000000000000001', '30649edf9be7d2c2', '3000000000000000', - 'RFC2268-3', dict(effective_keylen=64)), - #('0000000000000000', '61a8a244adacccf0', '88', - # 'RFC2268-4', dict(effective_keylen=64)), - ('0000000000000000', '6ccf4308974c267f', '88bca90e90875a', - 'RFC2268-5', dict(effective_keylen=64)), - ('0000000000000000', '1a807d272bbe5db1', '88bca90e90875a7f0f79c384627bafb2', - 'RFC2268-6', dict(effective_keylen=64)), - - # 128-bit effective key length - ('0000000000000000', '2269552ab0f85ca6', '88bca90e90875a7f0f79c384627bafb2', - "RFC2268-7", dict(effective_keylen=128)), - ('0000000000000000', '5b78d3a43dfff1f1', - '88bca90e90875a7f0f79c384627bafb216f80a6f85920584c42fceb0be255daf1e', - "RFC2268-8", dict(effective_keylen=129)), - - # Test vectors from PyCrypto 2.0.1's testdata.py - # 1024-bit effective key length - ('0000000000000000', '624fb3e887419e48', '5068696c6970476c617373', - 'PCTv201-0'), - ('ffffffffffffffff', '79cadef44c4a5a85', '5068696c6970476c617373', - 'PCTv201-1'), - ('0001020304050607', '90411525b34e4c2c', '5068696c6970476c617373', - 'PCTv201-2'), - ('0011223344556677', '078656aaba61cbfb', '5068696c6970476c617373', - 'PCTv201-3'), - ('0000000000000000', 'd7bcc5dbb4d6e56a', 'ffffffffffffffff', - 'PCTv201-4'), - ('ffffffffffffffff', '7259018ec557b357', 'ffffffffffffffff', - 'PCTv201-5'), - ('0001020304050607', '93d20a497f2ccb62', 'ffffffffffffffff', - 'PCTv201-6'), - ('0011223344556677', 'cb15a7f819c0014d', 'ffffffffffffffff', - 'PCTv201-7'), - ('0000000000000000', '63ac98cdf3843a7a', 'ffffffffffffffff5065746572477265656e6177617953e5ffe553', - 'PCTv201-8'), - ('ffffffffffffffff', '3fb49e2fa12371dd', 'ffffffffffffffff5065746572477265656e6177617953e5ffe553', - 'PCTv201-9'), - ('0001020304050607', '46414781ab387d5f', 'ffffffffffffffff5065746572477265656e6177617953e5ffe553', - 'PCTv201-10'), - ('0011223344556677', 'be09dc81feaca271', 'ffffffffffffffff5065746572477265656e6177617953e5ffe553', - 'PCTv201-11'), - ('0000000000000000', 'e64221e608be30ab', '53e5ffe553', - 'PCTv201-12'), - ('ffffffffffffffff', '862bc60fdcd4d9a9', '53e5ffe553', - 'PCTv201-13'), - ('0001020304050607', '6a34da50fa5e47de', '53e5ffe553', - 'PCTv201-14'), - ('0011223344556677', '584644c34503122c', '53e5ffe553', - 'PCTv201-15'), -] - -class BufferOverflowTest(unittest.TestCase): - # Test a buffer overflow found in older versions of PyCrypto - - def runTest(self): - """ARC2 with keylength > 128""" - key = b("x") * 16384 - self.assertRaises(ValueError, ARC2.new, key, ARC2.MODE_ECB) - -class KeyLength(unittest.TestCase): - - def runTest(self): - ARC2.new(b'\x00' * 16, ARC2.MODE_ECB, effective_keylen=40) - self.assertRaises(ValueError, ARC2.new, bchr(0) * 4, ARC2.MODE_ECB) - self.assertRaises(ValueError, ARC2.new, bchr(0) * 129, ARC2.MODE_ECB) - - self.assertRaises(ValueError, ARC2.new, bchr(0) * 16, ARC2.MODE_ECB, - effective_keylen=39) - self.assertRaises(ValueError, ARC2.new, bchr(0) * 16, ARC2.MODE_ECB, - effective_keylen=1025) - - -class TestOutput(unittest.TestCase): - - def runTest(self): - # Encrypt/Decrypt data and test output parameter - - cipher = ARC2.new(b'4'*16, ARC2.MODE_ECB) - - pt = b'5' * 16 - ct = cipher.encrypt(pt) - - output = bytearray(16) - res = cipher.encrypt(pt, output=output) - self.assertEqual(ct, output) - self.assertEqual(res, None) - - res = cipher.decrypt(ct, output=output) - self.assertEqual(pt, output) - self.assertEqual(res, None) - - output = memoryview(bytearray(16)) - cipher.encrypt(pt, output=output) - self.assertEqual(ct, output) - - cipher.decrypt(ct, output=output) - self.assertEqual(pt, output) - - self.assertRaises(TypeError, cipher.encrypt, pt, output=b'0'*16) - self.assertRaises(TypeError, cipher.decrypt, ct, output=b'0'*16) - - shorter_output = bytearray(7) - self.assertRaises(ValueError, cipher.encrypt, pt, output=shorter_output) - self.assertRaises(ValueError, cipher.decrypt, ct, output=shorter_output) - - -def get_tests(config={}): - from Crypto.Cipher import ARC2 - from .common import make_block_tests - - tests = make_block_tests(ARC2, "ARC2", test_data) - tests.append(BufferOverflowTest()) - tests.append(KeyLength()) - tests += [TestOutput()] - - return tests - -if __name__ == '__main__': - import unittest - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') - -# vim:set ts=4 sw=4 sts=4 expandtab: diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageOps.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageOps.py deleted file mode 100644 index 17702778c134abcb51d7632367fbbf1a2f3048fa..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageOps.py +++ /dev/null @@ -1,628 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# standard image operations -# -# History: -# 2001-10-20 fl Created -# 2001-10-23 fl Added autocontrast operator -# 2001-12-18 fl Added Kevin's fit operator -# 2004-03-14 fl Fixed potential division by zero in equalize -# 2005-05-05 fl Fixed equalize for low number of values -# -# Copyright (c) 2001-2004 by Secret Labs AB -# Copyright (c) 2001-2004 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import functools -import operator -import re - -from . import ExifTags, Image, ImagePalette - -# -# helpers - - -def _border(border): - if isinstance(border, tuple): - if len(border) == 2: - left, top = right, bottom = border - elif len(border) == 4: - left, top, right, bottom = border - else: - left = top = right = bottom = border - return left, top, right, bottom - - -def _color(color, mode): - if isinstance(color, str): - from . import ImageColor - - color = ImageColor.getcolor(color, mode) - return color - - -def _lut(image, lut): - if image.mode == "P": - # FIXME: apply to lookup table, not image data - msg = "mode P support coming soon" - raise NotImplementedError(msg) - elif image.mode in ("L", "RGB"): - if image.mode == "RGB" and len(lut) == 256: - lut = lut + lut + lut - return image.point(lut) - else: - msg = "not supported for this image mode" - raise OSError(msg) - - -# -# actions - - -def autocontrast(image, cutoff=0, ignore=None, mask=None, preserve_tone=False): - """ - Maximize (normalize) image contrast. This function calculates a - histogram of the input image (or mask region), removes ``cutoff`` percent of the - lightest and darkest pixels from the histogram, and remaps the image - so that the darkest pixel becomes black (0), and the lightest - becomes white (255). - - :param image: The image to process. - :param cutoff: The percent to cut off from the histogram on the low and - high ends. Either a tuple of (low, high), or a single - number for both. - :param ignore: The background pixel value (use None for no background). - :param mask: Histogram used in contrast operation is computed using pixels - within the mask. If no mask is given the entire image is used - for histogram computation. - :param preserve_tone: Preserve image tone in Photoshop-like style autocontrast. - - .. versionadded:: 8.2.0 - - :return: An image. - """ - if preserve_tone: - histogram = image.convert("L").histogram(mask) - else: - histogram = image.histogram(mask) - - lut = [] - for layer in range(0, len(histogram), 256): - h = histogram[layer : layer + 256] - if ignore is not None: - # get rid of outliers - try: - h[ignore] = 0 - except TypeError: - # assume sequence - for ix in ignore: - h[ix] = 0 - if cutoff: - # cut off pixels from both ends of the histogram - if not isinstance(cutoff, tuple): - cutoff = (cutoff, cutoff) - # get number of pixels - n = 0 - for ix in range(256): - n = n + h[ix] - # remove cutoff% pixels from the low end - cut = n * cutoff[0] // 100 - for lo in range(256): - if cut > h[lo]: - cut = cut - h[lo] - h[lo] = 0 - else: - h[lo] -= cut - cut = 0 - if cut <= 0: - break - # remove cutoff% samples from the high end - cut = n * cutoff[1] // 100 - for hi in range(255, -1, -1): - if cut > h[hi]: - cut = cut - h[hi] - h[hi] = 0 - else: - h[hi] -= cut - cut = 0 - if cut <= 0: - break - # find lowest/highest samples after preprocessing - for lo in range(256): - if h[lo]: - break - for hi in range(255, -1, -1): - if h[hi]: - break - if hi <= lo: - # don't bother - lut.extend(list(range(256))) - else: - scale = 255.0 / (hi - lo) - offset = -lo * scale - for ix in range(256): - ix = int(ix * scale + offset) - if ix < 0: - ix = 0 - elif ix > 255: - ix = 255 - lut.append(ix) - return _lut(image, lut) - - -def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoint=127): - """ - Colorize grayscale image. - This function calculates a color wedge which maps all black pixels in - the source image to the first color and all white pixels to the - second color. If ``mid`` is specified, it uses three-color mapping. - The ``black`` and ``white`` arguments should be RGB tuples or color names; - optionally you can use three-color mapping by also specifying ``mid``. - Mapping positions for any of the colors can be specified - (e.g. ``blackpoint``), where these parameters are the integer - value corresponding to where the corresponding color should be mapped. - These parameters must have logical order, such that - ``blackpoint <= midpoint <= whitepoint`` (if ``mid`` is specified). - - :param image: The image to colorize. - :param black: The color to use for black input pixels. - :param white: The color to use for white input pixels. - :param mid: The color to use for midtone input pixels. - :param blackpoint: an int value [0, 255] for the black mapping. - :param whitepoint: an int value [0, 255] for the white mapping. - :param midpoint: an int value [0, 255] for the midtone mapping. - :return: An image. - """ - - # Initial asserts - assert image.mode == "L" - if mid is None: - assert 0 <= blackpoint <= whitepoint <= 255 - else: - assert 0 <= blackpoint <= midpoint <= whitepoint <= 255 - - # Define colors from arguments - black = _color(black, "RGB") - white = _color(white, "RGB") - if mid is not None: - mid = _color(mid, "RGB") - - # Empty lists for the mapping - red = [] - green = [] - blue = [] - - # Create the low-end values - for i in range(0, blackpoint): - red.append(black[0]) - green.append(black[1]) - blue.append(black[2]) - - # Create the mapping (2-color) - if mid is None: - range_map = range(0, whitepoint - blackpoint) - - for i in range_map: - red.append(black[0] + i * (white[0] - black[0]) // len(range_map)) - green.append(black[1] + i * (white[1] - black[1]) // len(range_map)) - blue.append(black[2] + i * (white[2] - black[2]) // len(range_map)) - - # Create the mapping (3-color) - else: - range_map1 = range(0, midpoint - blackpoint) - range_map2 = range(0, whitepoint - midpoint) - - for i in range_map1: - red.append(black[0] + i * (mid[0] - black[0]) // len(range_map1)) - green.append(black[1] + i * (mid[1] - black[1]) // len(range_map1)) - blue.append(black[2] + i * (mid[2] - black[2]) // len(range_map1)) - for i in range_map2: - red.append(mid[0] + i * (white[0] - mid[0]) // len(range_map2)) - green.append(mid[1] + i * (white[1] - mid[1]) // len(range_map2)) - blue.append(mid[2] + i * (white[2] - mid[2]) // len(range_map2)) - - # Create the high-end values - for i in range(0, 256 - whitepoint): - red.append(white[0]) - green.append(white[1]) - blue.append(white[2]) - - # Return converted image - image = image.convert("RGB") - return _lut(image, red + green + blue) - - -def contain(image, size, method=Image.Resampling.BICUBIC): - """ - Returns a resized version of the image, set to the maximum width and height - within the requested size, while maintaining the original aspect ratio. - - :param image: The image to resize and crop. - :param size: The requested output size in pixels, given as a - (width, height) tuple. - :param method: Resampling method to use. Default is - :py:attr:`~PIL.Image.Resampling.BICUBIC`. - See :ref:`concept-filters`. - :return: An image. - """ - - im_ratio = image.width / image.height - dest_ratio = size[0] / size[1] - - if im_ratio != dest_ratio: - if im_ratio > dest_ratio: - new_height = round(image.height / image.width * size[0]) - if new_height != size[1]: - size = (size[0], new_height) - else: - new_width = round(image.width / image.height * size[1]) - if new_width != size[0]: - size = (new_width, size[1]) - return image.resize(size, resample=method) - - -def pad(image, size, method=Image.Resampling.BICUBIC, color=None, centering=(0.5, 0.5)): - """ - Returns a resized and padded version of the image, expanded to fill the - requested aspect ratio and size. - - :param image: The image to resize and crop. - :param size: The requested output size in pixels, given as a - (width, height) tuple. - :param method: Resampling method to use. Default is - :py:attr:`~PIL.Image.Resampling.BICUBIC`. - See :ref:`concept-filters`. - :param color: The background color of the padded image. - :param centering: Control the position of the original image within the - padded version. - - (0.5, 0.5) will keep the image centered - (0, 0) will keep the image aligned to the top left - (1, 1) will keep the image aligned to the bottom - right - :return: An image. - """ - - resized = contain(image, size, method) - if resized.size == size: - out = resized - else: - out = Image.new(image.mode, size, color) - if resized.palette: - out.putpalette(resized.getpalette()) - if resized.width != size[0]: - x = round((size[0] - resized.width) * max(0, min(centering[0], 1))) - out.paste(resized, (x, 0)) - else: - y = round((size[1] - resized.height) * max(0, min(centering[1], 1))) - out.paste(resized, (0, y)) - return out - - -def crop(image, border=0): - """ - Remove border from image. The same amount of pixels are removed - from all four sides. This function works on all image modes. - - .. seealso:: :py:meth:`~PIL.Image.Image.crop` - - :param image: The image to crop. - :param border: The number of pixels to remove. - :return: An image. - """ - left, top, right, bottom = _border(border) - return image.crop((left, top, image.size[0] - right, image.size[1] - bottom)) - - -def scale(image, factor, resample=Image.Resampling.BICUBIC): - """ - Returns a rescaled image by a specific factor given in parameter. - A factor greater than 1 expands the image, between 0 and 1 contracts the - image. - - :param image: The image to rescale. - :param factor: The expansion factor, as a float. - :param resample: Resampling method to use. Default is - :py:attr:`~PIL.Image.Resampling.BICUBIC`. - See :ref:`concept-filters`. - :returns: An :py:class:`~PIL.Image.Image` object. - """ - if factor == 1: - return image.copy() - elif factor <= 0: - msg = "the factor must be greater than 0" - raise ValueError(msg) - else: - size = (round(factor * image.width), round(factor * image.height)) - return image.resize(size, resample) - - -def deform(image, deformer, resample=Image.Resampling.BILINEAR): - """ - Deform the image. - - :param image: The image to deform. - :param deformer: A deformer object. Any object that implements a - ``getmesh`` method can be used. - :param resample: An optional resampling filter. Same values possible as - in the PIL.Image.transform function. - :return: An image. - """ - return image.transform( - image.size, Image.Transform.MESH, deformer.getmesh(image), resample - ) - - -def equalize(image, mask=None): - """ - Equalize the image histogram. This function applies a non-linear - mapping to the input image, in order to create a uniform - distribution of grayscale values in the output image. - - :param image: The image to equalize. - :param mask: An optional mask. If given, only the pixels selected by - the mask are included in the analysis. - :return: An image. - """ - if image.mode == "P": - image = image.convert("RGB") - h = image.histogram(mask) - lut = [] - for b in range(0, len(h), 256): - histo = [_f for _f in h[b : b + 256] if _f] - if len(histo) <= 1: - lut.extend(list(range(256))) - else: - step = (functools.reduce(operator.add, histo) - histo[-1]) // 255 - if not step: - lut.extend(list(range(256))) - else: - n = step // 2 - for i in range(256): - lut.append(n // step) - n = n + h[i + b] - return _lut(image, lut) - - -def expand(image, border=0, fill=0): - """ - Add border to the image - - :param image: The image to expand. - :param border: Border width, in pixels. - :param fill: Pixel fill value (a color value). Default is 0 (black). - :return: An image. - """ - left, top, right, bottom = _border(border) - width = left + image.size[0] + right - height = top + image.size[1] + bottom - color = _color(fill, image.mode) - if image.palette: - palette = ImagePalette.ImagePalette(palette=image.getpalette()) - if isinstance(color, tuple): - color = palette.getcolor(color) - else: - palette = None - out = Image.new(image.mode, (width, height), color) - if palette: - out.putpalette(palette.palette) - out.paste(image, (left, top)) - return out - - -def fit(image, size, method=Image.Resampling.BICUBIC, bleed=0.0, centering=(0.5, 0.5)): - """ - Returns a resized and cropped version of the image, cropped to the - requested aspect ratio and size. - - This function was contributed by Kevin Cazabon. - - :param image: The image to resize and crop. - :param size: The requested output size in pixels, given as a - (width, height) tuple. - :param method: Resampling method to use. Default is - :py:attr:`~PIL.Image.Resampling.BICUBIC`. - See :ref:`concept-filters`. - :param bleed: Remove a border around the outside of the image from all - four edges. The value is a decimal percentage (use 0.01 for - one percent). The default value is 0 (no border). - Cannot be greater than or equal to 0.5. - :param centering: Control the cropping position. Use (0.5, 0.5) for - center cropping (e.g. if cropping the width, take 50% off - of the left side, and therefore 50% off the right side). - (0.0, 0.0) will crop from the top left corner (i.e. if - cropping the width, take all of the crop off of the right - side, and if cropping the height, take all of it off the - bottom). (1.0, 0.0) will crop from the bottom left - corner, etc. (i.e. if cropping the width, take all of the - crop off the left side, and if cropping the height take - none from the top, and therefore all off the bottom). - :return: An image. - """ - - # by Kevin Cazabon, Feb 17/2000 - # kevin@cazabon.com - # https://www.cazabon.com - - # ensure centering is mutable - centering = list(centering) - - if not 0.0 <= centering[0] <= 1.0: - centering[0] = 0.5 - if not 0.0 <= centering[1] <= 1.0: - centering[1] = 0.5 - - if not 0.0 <= bleed < 0.5: - bleed = 0.0 - - # calculate the area to use for resizing and cropping, subtracting - # the 'bleed' around the edges - - # number of pixels to trim off on Top and Bottom, Left and Right - bleed_pixels = (bleed * image.size[0], bleed * image.size[1]) - - live_size = ( - image.size[0] - bleed_pixels[0] * 2, - image.size[1] - bleed_pixels[1] * 2, - ) - - # calculate the aspect ratio of the live_size - live_size_ratio = live_size[0] / live_size[1] - - # calculate the aspect ratio of the output image - output_ratio = size[0] / size[1] - - # figure out if the sides or top/bottom will be cropped off - if live_size_ratio == output_ratio: - # live_size is already the needed ratio - crop_width = live_size[0] - crop_height = live_size[1] - elif live_size_ratio >= output_ratio: - # live_size is wider than what's needed, crop the sides - crop_width = output_ratio * live_size[1] - crop_height = live_size[1] - else: - # live_size is taller than what's needed, crop the top and bottom - crop_width = live_size[0] - crop_height = live_size[0] / output_ratio - - # make the crop - crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering[0] - crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering[1] - - crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height) - - # resize the image and return it - return image.resize(size, method, box=crop) - - -def flip(image): - """ - Flip the image vertically (top to bottom). - - :param image: The image to flip. - :return: An image. - """ - return image.transpose(Image.Transpose.FLIP_TOP_BOTTOM) - - -def grayscale(image): - """ - Convert the image to grayscale. - - :param image: The image to convert. - :return: An image. - """ - return image.convert("L") - - -def invert(image): - """ - Invert (negate) the image. - - :param image: The image to invert. - :return: An image. - """ - lut = [] - for i in range(256): - lut.append(255 - i) - return image.point(lut) if image.mode == "1" else _lut(image, lut) - - -def mirror(image): - """ - Flip image horizontally (left to right). - - :param image: The image to mirror. - :return: An image. - """ - return image.transpose(Image.Transpose.FLIP_LEFT_RIGHT) - - -def posterize(image, bits): - """ - Reduce the number of bits for each color channel. - - :param image: The image to posterize. - :param bits: The number of bits to keep for each channel (1-8). - :return: An image. - """ - lut = [] - mask = ~(2 ** (8 - bits) - 1) - for i in range(256): - lut.append(i & mask) - return _lut(image, lut) - - -def solarize(image, threshold=128): - """ - Invert all pixel values above a threshold. - - :param image: The image to solarize. - :param threshold: All pixels above this greyscale level are inverted. - :return: An image. - """ - lut = [] - for i in range(256): - if i < threshold: - lut.append(i) - else: - lut.append(255 - i) - return _lut(image, lut) - - -def exif_transpose(image, *, in_place=False): - """ - If an image has an EXIF Orientation tag, other than 1, transpose the image - accordingly, and remove the orientation data. - - :param image: The image to transpose. - :param in_place: Boolean. Keyword-only argument. - If ``True``, the original image is modified in-place, and ``None`` is returned. - If ``False`` (default), a new :py:class:`~PIL.Image.Image` object is returned - with the transposition applied. If there is no transposition, a copy of the - image will be returned. - """ - image_exif = image.getexif() - orientation = image_exif.get(ExifTags.Base.Orientation) - method = { - 2: Image.Transpose.FLIP_LEFT_RIGHT, - 3: Image.Transpose.ROTATE_180, - 4: Image.Transpose.FLIP_TOP_BOTTOM, - 5: Image.Transpose.TRANSPOSE, - 6: Image.Transpose.ROTATE_270, - 7: Image.Transpose.TRANSVERSE, - 8: Image.Transpose.ROTATE_90, - }.get(orientation) - if method is not None: - transposed_image = image.transpose(method) - if in_place: - image.im = transposed_image.im - image.pyaccess = None - image._size = transposed_image._size - exif_image = image if in_place else transposed_image - - exif = exif_image.getexif() - if ExifTags.Base.Orientation in exif: - del exif[ExifTags.Base.Orientation] - if "exif" in exif_image.info: - exif_image.info["exif"] = exif.tobytes() - elif "Raw profile type exif" in exif_image.info: - exif_image.info["Raw profile type exif"] = exif.tobytes().hex() - elif "XML:com.adobe.xmp" in exif_image.info: - for pattern in ( - r'tiff:Orientation="([0-9])"', - r"([0-9])", - ): - exif_image.info["XML:com.adobe.xmp"] = re.sub( - pattern, "", exif_image.info["XML:com.adobe.xmp"] - ) - if not in_place: - return transposed_image - elif not in_place: - return image.copy() diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/_core/_fileio.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/_core/_fileio.py deleted file mode 100644 index 35e8e8af6c11dd6690a8382af6a23d1391fff9dc..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/_core/_fileio.py +++ /dev/null @@ -1,603 +0,0 @@ -from __future__ import annotations - -import os -import pathlib -import sys -from dataclasses import dataclass -from functools import partial -from os import PathLike -from typing import ( - IO, - TYPE_CHECKING, - Any, - AnyStr, - AsyncIterator, - Callable, - Generic, - Iterable, - Iterator, - Sequence, - cast, - overload, -) - -from .. import to_thread -from ..abc import AsyncResource - -if sys.version_info >= (3, 8): - from typing import Final -else: - from typing_extensions import Final - -if TYPE_CHECKING: - from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer -else: - ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object - - -class AsyncFile(AsyncResource, Generic[AnyStr]): - """ - An asynchronous file object. - - This class wraps a standard file object and provides async friendly versions of the following - blocking methods (where available on the original file object): - - * read - * read1 - * readline - * readlines - * readinto - * readinto1 - * write - * writelines - * truncate - * seek - * tell - * flush - - All other methods are directly passed through. - - This class supports the asynchronous context manager protocol which closes the underlying file - at the end of the context block. - - This class also supports asynchronous iteration:: - - async with await open_file(...) as f: - async for line in f: - print(line) - """ - - def __init__(self, fp: IO[AnyStr]) -> None: - self._fp: Any = fp - - def __getattr__(self, name: str) -> object: - return getattr(self._fp, name) - - @property - def wrapped(self) -> IO[AnyStr]: - """The wrapped file object.""" - return self._fp - - async def __aiter__(self) -> AsyncIterator[AnyStr]: - while True: - line = await self.readline() - if line: - yield line - else: - break - - async def aclose(self) -> None: - return await to_thread.run_sync(self._fp.close) - - async def read(self, size: int = -1) -> AnyStr: - return await to_thread.run_sync(self._fp.read, size) - - async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes: - return await to_thread.run_sync(self._fp.read1, size) - - async def readline(self) -> AnyStr: - return await to_thread.run_sync(self._fp.readline) - - async def readlines(self) -> list[AnyStr]: - return await to_thread.run_sync(self._fp.readlines) - - async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes: - return await to_thread.run_sync(self._fp.readinto, b) - - async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes: - return await to_thread.run_sync(self._fp.readinto1, b) - - @overload - async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: - ... - - @overload - async def write(self: AsyncFile[str], b: str) -> int: - ... - - async def write(self, b: ReadableBuffer | str) -> int: - return await to_thread.run_sync(self._fp.write, b) - - @overload - async def writelines( - self: AsyncFile[bytes], lines: Iterable[ReadableBuffer] - ) -> None: - ... - - @overload - async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: - ... - - async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None: - return await to_thread.run_sync(self._fp.writelines, lines) - - async def truncate(self, size: int | None = None) -> int: - return await to_thread.run_sync(self._fp.truncate, size) - - async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int: - return await to_thread.run_sync(self._fp.seek, offset, whence) - - async def tell(self) -> int: - return await to_thread.run_sync(self._fp.tell) - - async def flush(self) -> None: - return await to_thread.run_sync(self._fp.flush) - - -@overload -async def open_file( - file: str | PathLike[str] | int, - mode: OpenBinaryMode, - buffering: int = ..., - encoding: str | None = ..., - errors: str | None = ..., - newline: str | None = ..., - closefd: bool = ..., - opener: Callable[[str, int], int] | None = ..., -) -> AsyncFile[bytes]: - ... - - -@overload -async def open_file( - file: str | PathLike[str] | int, - mode: OpenTextMode = ..., - buffering: int = ..., - encoding: str | None = ..., - errors: str | None = ..., - newline: str | None = ..., - closefd: bool = ..., - opener: Callable[[str, int], int] | None = ..., -) -> AsyncFile[str]: - ... - - -async def open_file( - file: str | PathLike[str] | int, - mode: str = "r", - buffering: int = -1, - encoding: str | None = None, - errors: str | None = None, - newline: str | None = None, - closefd: bool = True, - opener: Callable[[str, int], int] | None = None, -) -> AsyncFile[Any]: - """ - Open a file asynchronously. - - The arguments are exactly the same as for the builtin :func:`open`. - - :return: an asynchronous file object - - """ - fp = await to_thread.run_sync( - open, file, mode, buffering, encoding, errors, newline, closefd, opener - ) - return AsyncFile(fp) - - -def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]: - """ - Wrap an existing file as an asynchronous file. - - :param file: an existing file-like object - :return: an asynchronous file object - - """ - return AsyncFile(file) - - -@dataclass(eq=False) -class _PathIterator(AsyncIterator["Path"]): - iterator: Iterator[PathLike[str]] - - async def __anext__(self) -> Path: - nextval = await to_thread.run_sync(next, self.iterator, None, cancellable=True) - if nextval is None: - raise StopAsyncIteration from None - - return Path(cast("PathLike[str]", nextval)) - - -class Path: - """ - An asynchronous version of :class:`pathlib.Path`. - - This class cannot be substituted for :class:`pathlib.Path` or :class:`pathlib.PurePath`, but - it is compatible with the :class:`os.PathLike` interface. - - It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for the - deprecated :meth:`~pathlib.Path.link_to` method. - - Any methods that do disk I/O need to be awaited on. These methods are: - - * :meth:`~pathlib.Path.absolute` - * :meth:`~pathlib.Path.chmod` - * :meth:`~pathlib.Path.cwd` - * :meth:`~pathlib.Path.exists` - * :meth:`~pathlib.Path.expanduser` - * :meth:`~pathlib.Path.group` - * :meth:`~pathlib.Path.hardlink_to` - * :meth:`~pathlib.Path.home` - * :meth:`~pathlib.Path.is_block_device` - * :meth:`~pathlib.Path.is_char_device` - * :meth:`~pathlib.Path.is_dir` - * :meth:`~pathlib.Path.is_fifo` - * :meth:`~pathlib.Path.is_file` - * :meth:`~pathlib.Path.is_mount` - * :meth:`~pathlib.Path.lchmod` - * :meth:`~pathlib.Path.lstat` - * :meth:`~pathlib.Path.mkdir` - * :meth:`~pathlib.Path.open` - * :meth:`~pathlib.Path.owner` - * :meth:`~pathlib.Path.read_bytes` - * :meth:`~pathlib.Path.read_text` - * :meth:`~pathlib.Path.readlink` - * :meth:`~pathlib.Path.rename` - * :meth:`~pathlib.Path.replace` - * :meth:`~pathlib.Path.rmdir` - * :meth:`~pathlib.Path.samefile` - * :meth:`~pathlib.Path.stat` - * :meth:`~pathlib.Path.touch` - * :meth:`~pathlib.Path.unlink` - * :meth:`~pathlib.Path.write_bytes` - * :meth:`~pathlib.Path.write_text` - - Additionally, the following methods return an async iterator yielding :class:`~.Path` objects: - - * :meth:`~pathlib.Path.glob` - * :meth:`~pathlib.Path.iterdir` - * :meth:`~pathlib.Path.rglob` - """ - - __slots__ = "_path", "__weakref__" - - __weakref__: Any - - def __init__(self, *args: str | PathLike[str]) -> None: - self._path: Final[pathlib.Path] = pathlib.Path(*args) - - def __fspath__(self) -> str: - return self._path.__fspath__() - - def __str__(self) -> str: - return self._path.__str__() - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.as_posix()!r})" - - def __bytes__(self) -> bytes: - return self._path.__bytes__() - - def __hash__(self) -> int: - return self._path.__hash__() - - def __eq__(self, other: object) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__eq__(target) - - def __lt__(self, other: Path) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__lt__(target) - - def __le__(self, other: Path) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__le__(target) - - def __gt__(self, other: Path) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__gt__(target) - - def __ge__(self, other: Path) -> bool: - target = other._path if isinstance(other, Path) else other - return self._path.__ge__(target) - - def __truediv__(self, other: Any) -> Path: - return Path(self._path / other) - - def __rtruediv__(self, other: Any) -> Path: - return Path(other) / self - - @property - def parts(self) -> tuple[str, ...]: - return self._path.parts - - @property - def drive(self) -> str: - return self._path.drive - - @property - def root(self) -> str: - return self._path.root - - @property - def anchor(self) -> str: - return self._path.anchor - - @property - def parents(self) -> Sequence[Path]: - return tuple(Path(p) for p in self._path.parents) - - @property - def parent(self) -> Path: - return Path(self._path.parent) - - @property - def name(self) -> str: - return self._path.name - - @property - def suffix(self) -> str: - return self._path.suffix - - @property - def suffixes(self) -> list[str]: - return self._path.suffixes - - @property - def stem(self) -> str: - return self._path.stem - - async def absolute(self) -> Path: - path = await to_thread.run_sync(self._path.absolute) - return Path(path) - - def as_posix(self) -> str: - return self._path.as_posix() - - def as_uri(self) -> str: - return self._path.as_uri() - - def match(self, path_pattern: str) -> bool: - return self._path.match(path_pattern) - - def is_relative_to(self, *other: str | PathLike[str]) -> bool: - try: - self.relative_to(*other) - return True - except ValueError: - return False - - async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None: - func = partial(os.chmod, follow_symlinks=follow_symlinks) - return await to_thread.run_sync(func, self._path, mode) - - @classmethod - async def cwd(cls) -> Path: - path = await to_thread.run_sync(pathlib.Path.cwd) - return cls(path) - - async def exists(self) -> bool: - return await to_thread.run_sync(self._path.exists, cancellable=True) - - async def expanduser(self) -> Path: - return Path(await to_thread.run_sync(self._path.expanduser, cancellable=True)) - - def glob(self, pattern: str) -> AsyncIterator[Path]: - gen = self._path.glob(pattern) - return _PathIterator(gen) - - async def group(self) -> str: - return await to_thread.run_sync(self._path.group, cancellable=True) - - async def hardlink_to(self, target: str | pathlib.Path | Path) -> None: - if isinstance(target, Path): - target = target._path - - await to_thread.run_sync(os.link, target, self) - - @classmethod - async def home(cls) -> Path: - home_path = await to_thread.run_sync(pathlib.Path.home) - return cls(home_path) - - def is_absolute(self) -> bool: - return self._path.is_absolute() - - async def is_block_device(self) -> bool: - return await to_thread.run_sync(self._path.is_block_device, cancellable=True) - - async def is_char_device(self) -> bool: - return await to_thread.run_sync(self._path.is_char_device, cancellable=True) - - async def is_dir(self) -> bool: - return await to_thread.run_sync(self._path.is_dir, cancellable=True) - - async def is_fifo(self) -> bool: - return await to_thread.run_sync(self._path.is_fifo, cancellable=True) - - async def is_file(self) -> bool: - return await to_thread.run_sync(self._path.is_file, cancellable=True) - - async def is_mount(self) -> bool: - return await to_thread.run_sync(os.path.ismount, self._path, cancellable=True) - - def is_reserved(self) -> bool: - return self._path.is_reserved() - - async def is_socket(self) -> bool: - return await to_thread.run_sync(self._path.is_socket, cancellable=True) - - async def is_symlink(self) -> bool: - return await to_thread.run_sync(self._path.is_symlink, cancellable=True) - - def iterdir(self) -> AsyncIterator[Path]: - gen = self._path.iterdir() - return _PathIterator(gen) - - def joinpath(self, *args: str | PathLike[str]) -> Path: - return Path(self._path.joinpath(*args)) - - async def lchmod(self, mode: int) -> None: - await to_thread.run_sync(self._path.lchmod, mode) - - async def lstat(self) -> os.stat_result: - return await to_thread.run_sync(self._path.lstat, cancellable=True) - - async def mkdir( - self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False - ) -> None: - await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok) - - @overload - async def open( - self, - mode: OpenBinaryMode, - buffering: int = ..., - encoding: str | None = ..., - errors: str | None = ..., - newline: str | None = ..., - ) -> AsyncFile[bytes]: - ... - - @overload - async def open( - self, - mode: OpenTextMode = ..., - buffering: int = ..., - encoding: str | None = ..., - errors: str | None = ..., - newline: str | None = ..., - ) -> AsyncFile[str]: - ... - - async def open( - self, - mode: str = "r", - buffering: int = -1, - encoding: str | None = None, - errors: str | None = None, - newline: str | None = None, - ) -> AsyncFile[Any]: - fp = await to_thread.run_sync( - self._path.open, mode, buffering, encoding, errors, newline - ) - return AsyncFile(fp) - - async def owner(self) -> str: - return await to_thread.run_sync(self._path.owner, cancellable=True) - - async def read_bytes(self) -> bytes: - return await to_thread.run_sync(self._path.read_bytes) - - async def read_text( - self, encoding: str | None = None, errors: str | None = None - ) -> str: - return await to_thread.run_sync(self._path.read_text, encoding, errors) - - def relative_to(self, *other: str | PathLike[str]) -> Path: - return Path(self._path.relative_to(*other)) - - async def readlink(self) -> Path: - target = await to_thread.run_sync(os.readlink, self._path) - return Path(cast(str, target)) - - async def rename(self, target: str | pathlib.PurePath | Path) -> Path: - if isinstance(target, Path): - target = target._path - - await to_thread.run_sync(self._path.rename, target) - return Path(target) - - async def replace(self, target: str | pathlib.PurePath | Path) -> Path: - if isinstance(target, Path): - target = target._path - - await to_thread.run_sync(self._path.replace, target) - return Path(target) - - async def resolve(self, strict: bool = False) -> Path: - func = partial(self._path.resolve, strict=strict) - return Path(await to_thread.run_sync(func, cancellable=True)) - - def rglob(self, pattern: str) -> AsyncIterator[Path]: - gen = self._path.rglob(pattern) - return _PathIterator(gen) - - async def rmdir(self) -> None: - await to_thread.run_sync(self._path.rmdir) - - async def samefile( - self, other_path: str | bytes | int | pathlib.Path | Path - ) -> bool: - if isinstance(other_path, Path): - other_path = other_path._path - - return await to_thread.run_sync( - self._path.samefile, other_path, cancellable=True - ) - - async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result: - func = partial(os.stat, follow_symlinks=follow_symlinks) - return await to_thread.run_sync(func, self._path, cancellable=True) - - async def symlink_to( - self, - target: str | pathlib.Path | Path, - target_is_directory: bool = False, - ) -> None: - if isinstance(target, Path): - target = target._path - - await to_thread.run_sync(self._path.symlink_to, target, target_is_directory) - - async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None: - await to_thread.run_sync(self._path.touch, mode, exist_ok) - - async def unlink(self, missing_ok: bool = False) -> None: - try: - await to_thread.run_sync(self._path.unlink) - except FileNotFoundError: - if not missing_ok: - raise - - def with_name(self, name: str) -> Path: - return Path(self._path.with_name(name)) - - def with_stem(self, stem: str) -> Path: - return Path(self._path.with_name(stem + self._path.suffix)) - - def with_suffix(self, suffix: str) -> Path: - return Path(self._path.with_suffix(suffix)) - - async def write_bytes(self, data: bytes) -> int: - return await to_thread.run_sync(self._path.write_bytes, data) - - async def write_text( - self, - data: str, - encoding: str | None = None, - errors: str | None = None, - newline: str | None = None, - ) -> int: - # Path.write_text() does not support the "newline" parameter before Python 3.10 - def sync_write_text() -> int: - with self._path.open( - "w", encoding=encoding, errors=errors, newline=newline - ) as fp: - return fp.write(data) - - return await to_thread.run_sync(sync_write_text) - - -PathLike.register(Path) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fastapi/exception_handlers.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fastapi/exception_handlers.py deleted file mode 100644 index 6c2ba7fedf9337260824b62987e65301e4fed129..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fastapi/exception_handlers.py +++ /dev/null @@ -1,34 +0,0 @@ -from fastapi.encoders import jsonable_encoder -from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError -from fastapi.utils import is_body_allowed_for_status_code -from fastapi.websockets import WebSocket -from starlette.exceptions import HTTPException -from starlette.requests import Request -from starlette.responses import JSONResponse, Response -from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY, WS_1008_POLICY_VIOLATION - - -async def http_exception_handler(request: Request, exc: HTTPException) -> Response: - headers = getattr(exc, "headers", None) - if not is_body_allowed_for_status_code(exc.status_code): - return Response(status_code=exc.status_code, headers=headers) - return JSONResponse( - {"detail": exc.detail}, status_code=exc.status_code, headers=headers - ) - - -async def request_validation_exception_handler( - request: Request, exc: RequestValidationError -) -> JSONResponse: - return JSONResponse( - status_code=HTTP_422_UNPROCESSABLE_ENTITY, - content={"detail": jsonable_encoder(exc.errors())}, - ) - - -async def websocket_request_validation_exception_handler( - websocket: WebSocket, exc: WebSocketRequestValidationError -) -> None: - await websocket.close( - code=WS_1008_POLICY_VIOLATION, reason=jsonable_encoder(exc.errors()) - ) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/interpretation.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/interpretation.py deleted file mode 100644 index b261f4f637d2045f96e53da232faff3325cdc3e7..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/interpretation.py +++ /dev/null @@ -1,55 +0,0 @@ -"""gr.Interpretation() component""" - -from __future__ import annotations - -from typing import Any, Literal - -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import SimpleSerializable - -from gradio.components.base import Component, _Keywords - -set_documentation_group("component") - - -@document() -class Interpretation(Component, SimpleSerializable): - """ - Used to create an interpretation widget for a component. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a {dict} with keys "original" and "interpretation". - - Guides: custom-interpretations-with-blocks - """ - - def __init__( - self, - component: Component, - *, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - component: Which component to show in the interpretation widget. - visible: Whether or not the interpretation is visible. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - Component.__init__( - self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs - ) - self.component = component - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - visible: bool | None = None, - ): - return { - "visible": visible, - "value": value, - "__type__": "update", - } diff --git a/spaces/johnson906/recipedia/CONTRIBUTING.md b/spaces/johnson906/recipedia/CONTRIBUTING.md deleted file mode 100644 index e51bb72243333f8a83e97c373d5b6840ae1f811f..0000000000000000000000000000000000000000 --- a/spaces/johnson906/recipedia/CONTRIBUTING.md +++ /dev/null @@ -1,36 +0,0 @@ -# Contributing -We want to make contributing to this project as easy and transparent as -possible. - -## Pull Requests -We actively welcome your pull requests. - -1. Fork the repo and create your branch from `master`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). - -## Contributor License Agreement ("CLA") -In order to accept your pull request, we need you to submit a CLA. You only need -to do this once to work on any of Facebook's open source projects. - -Complete your CLA here: - -## Issues -We use GitHub issues to track public bugs. Please ensure your description is -clear and has sufficient instructions to be able to reproduce the issue. - -Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe -disclosure of security bugs. In those cases, please go through the process -outlined on that page and do not file a public issue. - -## Coding Style -* 4 spaces for indentation rather than tabs -* 100 character line length -* PEP8 formatting - -## License -By contributing to this project, you agree that your contributions will be licensed -under the LICENSE file in the root directory of this source tree. \ No newline at end of file diff --git a/spaces/jskalbg/ChatDev01/app.py b/spaces/jskalbg/ChatDev01/app.py deleted file mode 100644 index e696b4d98e12ef3aa3b92d7aedfb2d186e07fae9..0000000000000000000000000000000000000000 --- a/spaces/jskalbg/ChatDev01/app.py +++ /dev/null @@ -1,112 +0,0 @@ -import logging - -import requests -import os, shutil -from flask import Flask, send_from_directory, request, jsonify - -app = Flask(__name__, static_folder='online_log/static') - -app.logger.setLevel(logging.ERROR) - -log = logging.getLogger('werkzeug') -log.setLevel(logging.ERROR) - -messages = [] -import threading -from urllib.parse import parse_qs - -FILE_DIR = os.path.dirname(os.path.abspath(__file__)) -OUTPUT_DIR = os.path.join(FILE_DIR, "WareHouse") -def check_outdir(): - if not os.path.exists(OUTPUT_DIR): - os.mkdir(OUTPUT_DIR) - else: - shutil.rmtree(OUTPUT_DIR) - os.mkdir(OUTPUT_DIR) - - -def zip_all_files(): - shutil.make_archive("online_log/static/Outputs", "zip", OUTPUT_DIR) - - -def clear_all_files(): - shutil.rmtree(OUTPUT_DIR) - os.mkdir(OUTPUT_DIR) - - -def send_msg(role, text): - try: - data = {"role": role, "text": text} - response = requests.post("http://127.0.0.1:7860/send_message", json=data) - if response.status_code == 200: - print("Message sent successfully!") - else: - print("Failed to send message.") - except: - logging.info("flask app.py did not start for online log") - - -@app.route("/") -def index(): - return send_from_directory("online_log/static", "index.html") - -@app.route("/Outputs.zip") -def Outputs(): - return send_from_directory("online_log/static", "Outputs.zip") - -@app.route("/chain_visualizer") -def chain_visualizer(): - return send_from_directory("online_log/static", "chain_visualizer.html") - -@app.route("/replay") -def replay(): - return send_from_directory("online_log/static", "replay.html") - -@app.route("/download") -def download(): - return send_from_directory("online_log/static", "index.html") - -@app.route("/get_messages") -def get_messages(): - return jsonify(messages) - - -@app.route("/send_message", methods=["POST"]) -def send_message(): - data = request.get_json() - role = data.get("role") - text = data.get("text") - - avatarUrl = find_avatar_url(role) - - message = {"role": role, "text": text, "avatarUrl": avatarUrl} - messages.append(message) - return jsonify(message) - - -@app.post("/download") -def run(): - data = request.get_data().decode('utf-8') - query_params = parse_qs(data) - task = query_params['task'][0].replace("+", " ") - config = query_params['config'][0] - api_key = query_params['api_key'][0] - os.environ["OPENAI_API_KEY"] = api_key - check_outdir() - from run import runchatdev - # apper = threading.Thread(target=runchatdev, args=[task, config]) - # apper.setDaemon(True) - # apper.start() - runchatdev(task, config) - zip_all_files() - return send_from_directory("online_log/static", "index.html") - -def find_avatar_url(role): - role = role.replace(" ", "%20") - avatar_filename = f"avatars/{role}.png" - avatar_url = f"/static/{avatar_filename}" - return avatar_url - - -if __name__ == "__main__": - app.run(host="0.0.0.0", port=7860) \ No newline at end of file diff --git a/spaces/juancopi81/youtube-music-transcribe/README.md b/spaces/juancopi81/youtube-music-transcribe/README.md deleted file mode 100644 index c3a9492cf4f736d0e97ae4af2211a5e79627222b..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/youtube-music-transcribe/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Youtube Music Transcribe -emoji: 🐨 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.11.0 -python_version: 3.7 -app_file: app.py -pinned: false -license: apache-2.0 ---- diff --git a/spaces/juancopi81/youtube-music-transcribe/t5x/contrib/moe/training_utils.py b/spaces/juancopi81/youtube-music-transcribe/t5x/contrib/moe/training_utils.py deleted file mode 100644 index 6a181c570e3cd17bec8bfebf905973a997f4932d..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/youtube-music-transcribe/t5x/contrib/moe/training_utils.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2022 The T5X Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Extensions to Jax/Flax core functions for Mixture of Experts training. - -""" - -import dataclasses -import re -from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union - -import flax -import jax -import numpy as np -from t5x import train_state - -# Type Stubs -ParamTree = Any -PyTreeDef = Any -Gradients = Union[flax.core.FrozenDict, train_state.TrainState] - - -def match_fn(prefix: Optional[str]) -> Callable[[str], bool]: - """Creates a function returning true iff a string matches the prefix. - - Args: - prefix: Regex prefix to match. If none, then return match function will not - match any strings. - - Returns: - Prefix match function. - """ - if not prefix: - return lambda name: False - params_regex = re.compile(f'^{prefix}') - return lambda name: params_regex.match(name) is not None - - -def scale_sharded_grads(grads: Gradients, - sharded_match_fn: Optional[Callable[[str], bool]], - scale_factor: float) -> Gradients: - """Scales sharded grads, identified by sharded_match_fn, by scale_factor. - - Args: - grads: Parameter gradients. - sharded_match_fn: Filter function for distinguishing sharded parameters from - replicated parameters. - scale_factor: Amount by which to scale sharded parameter gradients. - - Returns: - Gradients matching input, expect with sharded parameter gradients rescaled. - """ - if sharded_match_fn: - names_and_grads, tree_def = _tree_flatten_with_names(grads) - scaled_grads = [ - grad * scale_factor if sharded_match_fn(name) else grad - for name, grad in names_and_grads - ] - return tree_def.unflatten(scaled_grads) - else: - return grads - - -def tree_map_with_names(f, param_tree, match_name_fn=lambda name: True): - """Like jax.tree_map but with a filter on the leaf path name. - - Args: - f: The function to be applied to each parameter in `param_tree`. - param_tree: The tree of parameters `f` should be applied to. - match_name_fn: This function is called with each tree leave's path name, - which has a path-like format ('a/b/c'), and decides whether `f` should be - applied to that leaf or the leaf should be kept as-is. - - Returns: - A tree identical in structure to `param_tree` but with the leaves the - result of calling `f` on them in the cases where `match_name_fn` returns - True for that leaf's path name. - """ - names_and_vals, tree_def = _tree_flatten_with_names(param_tree) - vals = [f(v) if match_name_fn(name) else v for name, v in names_and_vals] - return tree_def.unflatten(vals) - - -def _tree_flatten_with_names( - tree: ParamTree) -> Tuple[Sequence[Tuple[str, Any]], PyTreeDef]: - """Like jax.tree_flatten but also fetches leaf names. - - Specialized to parameter trees of the form {'key0': {'subkey0': Any}, ...}. - - Args: - tree: Tree of parameters to flatten. - - Returns: - - A list of leaf name and value pairs: [(name, value), ...]. - - A tree definition object representing the structure of the flattened tree. - """ - # PyTrees don't treat None values as leaves, so we explicitly declare them as - # such. - vals, tree_def = jax.tree_flatten(tree, is_leaf=lambda x: x is None) - - # 'Fake' token tree that is use to track jax internal tree traversal and - # adjust our custom tree traversal to be compatible with it. - tokens = range(len(vals)) - token_tree = tree_def.unflatten(tokens) - val_names, perm = zip(*_traverse_with_names(token_tree)) - inv_perm = np.argsort(perm) - - # Custom traversal should visit the same number of leaves. - if len(val_names) != len(vals): - raise ValueError(f'Pytree traversal detected {len(val_names)} names, ' - f'but {len(vals)} leafs.\nTreeDef is:\n{tree_def}') - - return [(val_names[i], v) for i, v in zip(inv_perm, vals)], tree_def - - -def _traverse_with_names( - param_tree: ParamTree) -> Iterable[Tuple[str, ParamTree]]: - """Traverses nested dicts/dataclasses and emits (leaf_name, leaf_val).""" - if dataclasses.is_dataclass(param_tree): - param_tree = flax.serialization.to_state_dict(param_tree) - if isinstance(param_tree, (dict, flax.core.FrozenDict)): - keys = sorted(param_tree.keys()) - for key in keys: - for path, v in _traverse_with_names(param_tree[key]): - yield (key + '/' + path).rstrip('/'), v - else: - yield '', param_tree diff --git a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/t5/t5_1_0/__init__.py b/spaces/juancopi81/youtube-music-transcribe/t5x/examples/t5/t5_1_0/__init__.py deleted file mode 100644 index da022c16301721a096a208e8bdb2a71bb87f9788..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/t5/t5_1_0/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2022 The T5X Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This empty file is needed for loading the gin files in this directory. diff --git a/spaces/justest/gpt4free/g4f/Provider/Providers/Acytoo.py b/spaces/justest/gpt4free/g4f/Provider/Providers/Acytoo.py deleted file mode 100644 index 06083eb554c5d86371481431c268f63f69042615..0000000000000000000000000000000000000000 --- a/spaces/justest/gpt4free/g4f/Provider/Providers/Acytoo.py +++ /dev/null @@ -1,41 +0,0 @@ -import os, requests -from ...typing import sha256, Dict, get_type_hints -import json - -url = "https://chat.acytoo.com/api/completions" -model = ['gpt-3.5-turbo'] -supports_stream = False -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - base = '' - for message in messages: - base += '%s: %s\n' % (message['role'], message['content']) - base += 'assistant:' - - headers = { - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" - } - data = { - "key": "", - "model": "gpt-3.5-turbo", - "messages": [ - { - "role": "user", - "content": base, - "createdAt": 1688518523500 - } - ], - "temperature": 1, - "password": "" - } - - response = requests.post(url, headers=headers, data=json.dumps(data)) - if response.status_code == 200: - yield response.text - else: - print(f"Error Occurred::{response.status_code}") - return None - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/kadirnar/yolor/yolor/utils/general.py b/spaces/kadirnar/yolor/yolor/utils/general.py deleted file mode 100644 index dac6f9e48ba633c8bc6a07ba3559c93cea640b92..0000000000000000000000000000000000000000 --- a/spaces/kadirnar/yolor/yolor/utils/general.py +++ /dev/null @@ -1,449 +0,0 @@ -# General utils - -import glob -import logging -import math -import os -import platform -import random -import re -import subprocess -import time -from pathlib import Path - -import cv2 -import matplotlib -import numpy as np -import torch -import yaml - -from yolor.utils.google_utils import gsutil_getsize -from yolor.utils.metrics import fitness, fitness_p, fitness_r, fitness_ap50, fitness_ap, fitness_f -from yolor.utils.torch_utils import init_torch_seeds - -# Set printoptions -torch.set_printoptions(linewidth=320, precision=5, profile='long') -np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 -matplotlib.rc('font', **{'size': 11}) - -# Prevent OpenCV from multithreading (to use PyTorch DataLoader) -cv2.setNumThreads(0) - - -def set_logging(rank=-1): - logging.basicConfig( - format="%(message)s", - level=logging.INFO if rank in [-1, 0] else logging.WARN) - - -def init_seeds(seed=0): - random.seed(seed) - np.random.seed(seed) - init_torch_seeds(seed) - - -def get_latest_run(search_dir='.'): - # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) - return max(last_list, key=os.path.getctime) if last_list else '' - - -def check_git_status(): - # Suggest 'git pull' if repo is out of date - if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'): - s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8') - if 'Your branch is behind' in s: - print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n') - - -def check_img_size(img_size, s=32): - # Verify img_size is a multiple of stride s - new_size = make_divisible(img_size, int(s)) # ceil gs-multiple - if new_size != img_size: - print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) - return new_size - - -def check_file(file): - # Search for file if not found - if os.path.isfile(file) or file == '': - return file - else: - files = glob.glob('./**/' + file, recursive=True) # find file - assert len(files), 'File Not Found: %s' % file # assert file was found - assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique - return files[0] # return file - - -def check_dataset(dict): - # Download dataset if not found locally - val, s = dict.get('val'), dict.get('download') - if val and len(val): - val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path - if not all(x.exists() for x in val): - print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) - if s and len(s): # download script - print('Downloading %s ...' % s) - if s.startswith('http') and s.endswith('.zip'): # URL - f = Path(s).name # filename - torch.hub.download_url_to_file(s, f) - r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip - else: # bash script - r = os.system(s) - print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value - else: - raise Exception('Dataset not found.') - - -def make_divisible(x, divisor): - # Returns x evenly divisible by divisor - return math.ceil(x / divisor) * divisor - - -def labels_to_class_weights(labels, nc=80): - # Get class weights (inverse frequency) from training labels - if labels[0] is None: # no labels loaded - return torch.Tensor() - - labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - classes = labels[:, 0].astype(np.int) # labels = [class xywh] - weights = np.bincount(classes, minlength=nc) # occurrences per class - - # Prepend gridpoint count (for uCE training) - # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image - # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start - - weights[weights == 0] = 1 # replace empty bins with 1 - weights = 1 / weights # number of targets per class - weights /= weights.sum() # normalize - return torch.from_numpy(weights) - - -def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): - # Produces image weights based on class mAPs - n = len(labels) - class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)]) - image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) - # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample - return image_weights - - -def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) - # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ - # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') - # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') - # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco - # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet - x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] - return x - - -def xyxy2xywh(x): - # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height - return y - - -def xywh2xyxy(x): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y - return y - - -def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): - # Rescale coords (xyxy) from img1_shape to img0_shape - if ratio_pad is None: # calculate from img0_shape - gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - else: - gain = ratio_pad[0][0] - pad = ratio_pad[1] - - coords[:, [0, 2]] -= pad[0] # x padding - coords[:, [1, 3]] -= pad[1] # y padding - coords[:, :4] /= gain - clip_coords(coords, img0_shape) - return coords - - -def clip_coords(boxes, img_shape): - # Clip bounding xyxy bounding boxes to image shape (height, width) - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 - - -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, EIoU=False, ECIoU=False, eps=1e-9): - # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 - box2 = box2.T - - # Get the coordinates of bounding boxes - if x1y1x2y2: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - else: # transform from xywh to xyxy - b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 - b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 - b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 - b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps - union = w1 * h1 + w2 * h2 - inter + eps - - iou = inter / union - if GIoU or DIoU or CIoU or EIoU or ECIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU or EIoU or ECIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + - (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha = v / ((1 + eps) - iou + v) - return iou - (rho2 / c2 + v * alpha) # CIoU - elif EIoU: # Efficient IoU https://arxiv.org/abs/2101.08158 - rho3 = (w1-w2) **2 - c3 = cw ** 2 + eps - rho4 = (h1-h2) **2 - c4 = ch ** 2 + eps - return iou - rho2 / c2 - rho3 / c3 - rho4 / c4 # EIoU - elif ECIoU: - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha = v / ((1 + eps) - iou + v) - rho3 = (w1-w2) **2 - c3 = cw ** 2 + eps - rho4 = (h1-h2) **2 - c4 = ch ** 2 + eps - return iou - v * alpha - rho2 / c2 - rho3 / c3 - rho4 / c4 # ECIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU - else: - return iou # IoU - - -def box_iou(box1, box2): - # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - box1 (Tensor[N, 4]) - box2 (Tensor[M, 4]) - Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) - - -def wh_iou(wh1, wh2): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 - wh1 = wh1[:, None] # [N,1,2] - wh2 = wh2[None] # [1,M,2] - inter = torch.min(wh1, wh2).prod(2) # [N,M] - return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) - - -def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False): - """Performs Non-Maximum Suppression (NMS) on inference results - - Returns: - detections with shape: nx6 (x1, y1, x2, y2, conf, cls) - """ - - nc = prediction[0].shape[1] - 5 # number of classes - xc = prediction[..., 4] > conf_thres # candidates - - # Settings - min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height - max_det = 300 # maximum number of detections per image - time_limit = 10.0 # seconds to quit after - redundant = True # require redundant detections - multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) - - t = time.time() - output = [torch.zeros(0, 6)] * prediction.shape[0] - for xi, x in enumerate(prediction): # image index, image inference - # Apply constraints - # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - x = x[xc[xi]] # confidence - - # If none remain process next image - if not x.shape[0]: - continue - - # Compute conf - x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf - - # Box (center x, center y, width, height) to (x1, y1, x2, y2) - box = xywh2xyxy(x[:, :4]) - - # Detections matrix nx6 (xyxy, conf, cls) - if multi_label: - i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - else: # best class only - conf, j = x[:, 5:].max(1, keepdim=True) - x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] - - # Filter by class - if classes: - x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] - - # Apply finite constraint - # if not torch.isfinite(x).all(): - # x = x[torch.isfinite(x).all(1)] - - # If none remain process next image - n = x.shape[0] # number of boxes - if not n: - continue - - # Sort by confidence - # x = x[x[:, 4].argsort(descending=True)] - - # Batched NMS - c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - i = torch.ops.torchvision.nms(boxes, scores, iou_thres) - if i.shape[0] > max_det: # limit detections - i = i[:max_det] - if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - weights = iou * scores[None] # box weights - x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - if redundant: - i = i[iou.sum(1) > 1] # require redundancy - - output[xi] = x[i] - if (time.time() - t) > time_limit: - break # time limit exceeded - - return output - - -def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer() - # Strip optimizer from 'f' to finalize training, optionally save as 's' - x = torch.load(f, map_location=torch.device('cpu')) - x['optimizer'] = None - x['training_results'] = None - x['epoch'] = -1 - #x['model'].half() # to FP16 - #for p in x['model'].parameters(): - # p.requires_grad = False - torch.save(x, s or f) - mb = os.path.getsize(s or f) / 1E6 # filesize - print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb)) - - -def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): - # Print mutation results to evolve.txt (for use with train.py --evolve) - a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys - b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) - - if bucket: - url = 'gs://%s/evolve.txt' % bucket - if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): - os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local - - with open('evolve.txt', 'a') as f: # append result - f.write(c + b + '\n') - x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows - x = x[np.argsort(-fitness(x))] # sort - np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness - - # Save yaml - for i, k in enumerate(hyp.keys()): - hyp[k] = float(x[0, i + 7]) - with open(yaml_file, 'w') as f: - results = tuple(x[0, :7]) - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') - yaml.dump(hyp, f, sort_keys=False) - - if bucket: - os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload - - -def apply_classifier(x, model, img, im0): - # applies a second stage classifier to yolo outputs - im0 = [im0] if isinstance(im0, np.ndarray) else im0 - for i, d in enumerate(x): # per image - if d is not None and len(d): - d = d.clone() - - # Reshape and pad cutouts - b = xyxy2xywh(d[:, :4]) # boxes - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square - b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad - d[:, :4] = xywh2xyxy(b).long() - - # Rescale boxes from img_size to im0 size - scale_coords(img.shape[2:], d[:, :4], im0[i].shape) - - # Classes - pred_cls1 = d[:, 5].long() - ims = [] - for j, a in enumerate(d): # per item - cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] - im = cv2.resize(cutout, (224, 224)) # BGR - # cv2.imwrite('test%i.jpg' % j, cutout) - - im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 - im /= 255.0 # 0 - 255 to 0.0 - 1.0 - ims.append(im) - - pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction - x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections - - return x - - -def increment_path(path, exist_ok=True, sep=''): - # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. - path = Path(path) # os-agnostic - if (path.exists() and exist_ok) or (not path.exists()): - return str(path) - else: - dirs = glob.glob(f"{path}{sep}*") # similar paths - matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] - i = [int(m.groups()[0]) for m in matches if m] # indices - n = max(i) + 1 if i else 2 # increment number - return f"{path}{sep}{n}" # update path diff --git a/spaces/kaguraaya/anime-remove-background/app.py b/spaces/kaguraaya/anime-remove-background/app.py deleted file mode 100644 index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000 --- a/spaces/kaguraaya/anime-remove-background/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -import huggingface_hub -import onnxruntime as rt -import numpy as np -import cv2 - - -def get_mask(img, s=1024): - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = rmbg_model.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis] - return mask - - -def rmbg_fn(img): - mask = get_mask(img) - img = (mask * img + 255 * (1 - mask)).astype(np.uint8) - mask = (mask * 255).astype(np.uint8) - img = np.concatenate([img, mask], axis=2, dtype=np.uint8) - mask = mask.repeat(3, axis=2) - return mask, img - - -if __name__ == "__main__": - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - rmbg_model = rt.InferenceSession(model_path, providers=providers) - app = gr.Blocks() - with app: - gr.Markdown("# Anime Remove Background\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n" - "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)") - with gr.Row(): - with gr.Column(): - input_img = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] - examples = gr.Dataset(components=[input_img], samples=examples_data) - run_btn = gr.Button(variant="primary") - output_mask = gr.Image(label="mask") - output_img = gr.Image(label="result", image_mode="RGBA") - examples.click(lambda x: x[0], [examples], [input_img]) - run_btn.click(rmbg_fn, [input_img], [output_mask, output_img]) - app.launch() diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/mkgui/base/__init__.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/mkgui/base/__init__.py deleted file mode 100644 index 6905fa0da4ea5b5b30797d5dae08dd2a199318ad..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/mkgui/base/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ - -from .core import Opyrator diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/vocoder/fregan/generator.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/vocoder/fregan/generator.py deleted file mode 100644 index c0dd3a867c058c1201cd4ab65e6e2f2147aeb05d..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/vocoder/fregan/generator.py +++ /dev/null @@ -1,210 +0,0 @@ -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from vocoder.fregan.utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5, 7)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[3], - padding=get_padding(kernel_size, dilation[3]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class FreGAN(torch.nn.Module): - def __init__(self, h, top_k=4): - super(FreGAN, self).__init__() - self.h = h - - self.num_kernels = len(h.resblock_kernel_sizes) - self.num_upsamples = len(h.upsample_rates) - self.upsample_rates = h.upsample_rates - self.up_kernels = h.upsample_kernel_sizes - self.cond_level = self.num_upsamples - top_k - self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3)) - resblock = ResBlock1 if h.resblock == '1' else ResBlock2 - - self.ups = nn.ModuleList() - self.cond_up = nn.ModuleList() - self.res_output = nn.ModuleList() - upsample_ = 1 - kr = 80 - - for i, (u, k) in enumerate(zip(self.upsample_rates, self.up_kernels)): -# self.ups.append(weight_norm( - # ConvTranspose1d(h.upsample_initial_channel // (2 ** i), h.upsample_initial_channel // (2 ** (i + 1)), - # k, u, padding=(k - u) // 2))) - self.ups.append(weight_norm(ConvTranspose1d(h.upsample_initial_channel//(2**i), - h.upsample_initial_channel//(2**(i+1)), - k, u, padding=(u//2 + u%2), output_padding=u%2))) - - if i > (self.num_upsamples - top_k): - self.res_output.append( - nn.Sequential( - nn.Upsample(scale_factor=u, mode='nearest'), - weight_norm(nn.Conv1d(h.upsample_initial_channel // (2 ** i), - h.upsample_initial_channel // (2 ** (i + 1)), 1)) - ) - ) - if i >= (self.num_upsamples - top_k): - self.cond_up.append( - weight_norm( - ConvTranspose1d(kr, h.upsample_initial_channel // (2 ** i), - self.up_kernels[i - 1], self.upsample_rates[i - 1], - padding=(self.upsample_rates[i-1]//2+self.upsample_rates[i-1]%2), output_padding=self.upsample_rates[i-1]%2)) - ) - kr = h.upsample_initial_channel // (2 ** i) - - upsample_ *= u - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h.upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond_up.apply(init_weights) - self.res_output.apply(init_weights) - - def forward(self, x): - mel = x - x = self.conv_pre(x) - output = None - for i in range(self.num_upsamples): - if i >= self.cond_level: - mel = self.cond_up[i - self.cond_level](mel) - x += mel - if i > self.cond_level: - if output is None: - output = self.res_output[i - self.cond_level - 1](x) - else: - output = self.res_output[i - self.cond_level - 1](output) - x = F.leaky_relu(x, LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - if output is not None: - output = output + x - - x = F.leaky_relu(output) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - for l in self.cond_up: - remove_weight_norm(l) - for l in self.res_output: - remove_weight_norm(l[1]) - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -''' - to run this, fix - from . import ResStack - into - from res_stack import ResStack -''' -if __name__ == '__main__': - ''' - torch.Size([3, 80, 10]) - torch.Size([3, 1, 2000]) - 4527362 - ''' - with open('config.json') as f: - data = f.read() - from utils import AttrDict - import json - json_config = json.loads(data) - h = AttrDict(json_config) - model = FreGAN(h) - - c = torch.randn(3, 80, 10) # (B, channels, T). - print(c.shape) - - y = model(c) # (B, 1, T ** prod(upsample_scales) - print(y.shape) - assert y.shape == torch.Size([3, 1, 2560]) # For normal melgan torch.Size([3, 1, 2560]) - - pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) - print(pytorch_total_params) \ No newline at end of file diff --git a/spaces/kmkarakaya/Auto_Review_Generation_in_Turkish/README.md b/spaces/kmkarakaya/Auto_Review_Generation_in_Turkish/README.md deleted file mode 100644 index 57868af616b1b6633efe89cedd65ae8aabf5a270..0000000000000000000000000000000000000000 --- a/spaces/kmkarakaya/Auto_Review_Generation_in_Turkish/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Auto Review Generation In Turkish -emoji: 🐠 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.1.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kokofixcomputers/chat-ui/src/lib/types/User.ts b/spaces/kokofixcomputers/chat-ui/src/lib/types/User.ts deleted file mode 100644 index 8c2a50cb567b5362cb794051055279fa4ee45c70..0000000000000000000000000000000000000000 --- a/spaces/kokofixcomputers/chat-ui/src/lib/types/User.ts +++ /dev/null @@ -1,14 +0,0 @@ -import type { ObjectId } from "mongodb"; -import type { Timestamps } from "./Timestamps"; - -export interface User extends Timestamps { - _id: ObjectId; - - username: string; - name: string; - avatarUrl: string; - hfUserId: string; - - // Session identifier, stored in the cookie - sessionId: string; -} diff --git a/spaces/kukuhtw/AutoGPT/tests/unit/test_browse_scrape_text.py b/spaces/kukuhtw/AutoGPT/tests/unit/test_browse_scrape_text.py deleted file mode 100644 index fea5ebfc05d466c7cb5711b5ac10e2ea102ddc45..0000000000000000000000000000000000000000 --- a/spaces/kukuhtw/AutoGPT/tests/unit/test_browse_scrape_text.py +++ /dev/null @@ -1,98 +0,0 @@ -# Generated by CodiumAI - -import requests - -from autogpt.commands.web_requests import scrape_text - -""" -Code Analysis - -Objective: -The objective of the "scrape_text" function is to scrape the text content from -a given URL and return it as a string, after removing any unwanted HTML tags and scripts. - -Inputs: -- url: a string representing the URL of the webpage to be scraped. - -Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. -2. Check if the response contains an HTTP error. If it does, return an error message. -3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags. -4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup. -5. Split the text into lines and then into chunks, removing any extra whitespace. -6. Join the chunks into a single string with newline characters between them. -7. Return the cleaned text. - -Outputs: -- A string representing the cleaned text content of the webpage. - -Additional aspects: -- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively. -- The function removes script and style tags from the HTML to avoid including unwanted content in the text output. -- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text. -""" - - -class TestScrapeText: - # Tests that scrape_text() returns the expected text when given a valid URL. - def test_scrape_text_with_valid_url(self, mocker): - # Mock the requests.get() method to return a response with expected text - expected_text = "This is some sample text" - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = f"

    {expected_text}

    " - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL and assert that it returns the expected text - url = "http://www.example.com" - assert scrape_text(url) == expected_text - - # Tests that the function returns an error message when an invalid or unreachable url is provided. - def test_invalid_url(self, mocker): - # Mock the requests.get() method to raise an exception - mocker.patch( - "requests.Session.get", side_effect=requests.exceptions.RequestException - ) - - # Call the function with an invalid URL and assert that it returns an error message - url = "http://www.invalidurl.com" - error_message = scrape_text(url) - assert "Error:" in error_message - - # Tests that the function returns an empty string when the html page contains no text to be scraped. - def test_no_text(self, mocker): - # Mock the requests.get() method to return a response with no text - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = "" - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL and assert that it returns an empty string - url = "http://www.example.com" - assert scrape_text(url) == "" - - # Tests that the function returns an error message when the response status code is an http error (>=400). - def test_http_error(self, mocker): - # Mock the requests.get() method to return a response with a 404 status code - mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404)) - - # Call the function with a URL - result = scrape_text("https://www.example.com") - - # Check that the function returns an error message - assert result == "Error: HTTP 404 error" - - # Tests that scrape_text() properly handles HTML tags. - def test_scrape_text_with_html_tags(self, mocker): - # Create a mock response object with HTML containing tags - html = "

    This is bold text.

    " - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = html - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a URL - result = scrape_text("https://www.example.com") - - # Check that the function properly handles HTML tags - assert result == "This is bold text." diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/merge/options.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/merge/options.py deleted file mode 100644 index 0c4cfb99884992f5d69cef4b365f26947c3f837b..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/merge/options.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod, Roozbeh Pournader - - -class Options(object): - class UnknownOptionError(Exception): - pass - - def __init__(self, **kwargs): - - self.verbose = False - self.timing = False - self.drop_tables = [] - - self.set(**kwargs) - - def set(self, **kwargs): - for k, v in kwargs.items(): - if not hasattr(self, k): - raise self.UnknownOptionError("Unknown option '%s'" % k) - setattr(self, k, v) - - def parse_opts(self, argv, ignore_unknown=[]): - ret = [] - opts = {} - for a in argv: - orig_a = a - if not a.startswith("--"): - ret.append(a) - continue - a = a[2:] - i = a.find("=") - op = "=" - if i == -1: - if a.startswith("no-"): - k = a[3:] - v = False - else: - k = a - v = True - else: - k = a[:i] - if k[-1] in "-+": - op = k[-1] + "=" # Ops is '-=' or '+=' now. - k = k[:-1] - v = a[i + 1 :] - ok = k - k = k.replace("-", "_") - if not hasattr(self, k): - if ignore_unknown is True or ok in ignore_unknown: - ret.append(orig_a) - continue - else: - raise self.UnknownOptionError("Unknown option '%s'" % a) - - ov = getattr(self, k) - if isinstance(ov, bool): - v = bool(v) - elif isinstance(ov, int): - v = int(v) - elif isinstance(ov, list): - vv = v.split(",") - if vv == [""]: - vv = [] - vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] - if op == "=": - v = vv - elif op == "+=": - v = ov - v.extend(vv) - elif op == "-=": - v = ov - for x in vv: - if x in v: - v.remove(x) - else: - assert 0 - - opts[k] = v - self.set(**opts) - - return ret diff --git a/spaces/lakshmi324/complaintBox/README.md b/spaces/lakshmi324/complaintBox/README.md deleted file mode 100644 index eef0b76492663ceb61b183c39b01e781fda5a3f2..0000000000000000000000000000000000000000 --- a/spaces/lakshmi324/complaintBox/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ComplaintBox -emoji: ⚡ -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.8.2 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/network_rrdbnet.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/models/network_rrdbnet.py deleted file mode 100644 index a35e5c017738eb40759245b6c6c80c1ba750db5e..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/network_rrdbnet.py +++ /dev/null @@ -1,103 +0,0 @@ -import functools -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.nn.init as init - - -def initialize_weights(net_l, scale=1): - if not isinstance(net_l, list): - net_l = [net_l] - for net in net_l: - for m in net.modules(): - if isinstance(m, nn.Conv2d): - init.kaiming_normal_(m.weight, a=0, mode='fan_in') - m.weight.data *= scale # for residual block - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - init.kaiming_normal_(m.weight, a=0, mode='fan_in') - m.weight.data *= scale - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.BatchNorm2d): - init.constant_(m.weight, 1) - init.constant_(m.bias.data, 0.0) - - -def make_layer(block, n_layers): - layers = [] - for _ in range(n_layers): - layers.append(block()) - return nn.Sequential(*layers) - - -class ResidualDenseBlock_5C(nn.Module): - def __init__(self, nf=64, gc=32, bias=True): - super(ResidualDenseBlock_5C, self).__init__() - # gc: growth channel, i.e. intermediate channels - self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias) - self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias) - self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias) - self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias) - self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias) - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - # initialization - initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1) - - def forward(self, x): - x1 = self.lrelu(self.conv1(x)) - x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) - x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) - x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) - x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) - return x5 * 0.2 + x - - -class RRDB(nn.Module): - '''Residual in Residual Dense Block''' - - def __init__(self, nf, gc=32): - super(RRDB, self).__init__() - self.RDB1 = ResidualDenseBlock_5C(nf, gc) - self.RDB2 = ResidualDenseBlock_5C(nf, gc) - self.RDB3 = ResidualDenseBlock_5C(nf, gc) - - def forward(self, x): - out = self.RDB1(x) - out = self.RDB2(out) - out = self.RDB3(out) - return out * 0.2 + x - - -class RRDBNet(nn.Module): - def __init__(self, in_nc=3, out_nc=3, nf=64, nb=23, gc=32, sf=4): - super(RRDBNet, self).__init__() - RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc) - self.sf = sf - print([in_nc, out_nc, nf, nb, gc, sf]) - - self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True) - self.RRDB_trunk = make_layer(RRDB_block_f, nb) - self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - #### upsampling - self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - if self.sf==4: - self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True) - - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - def forward(self, x): - fea = self.conv_first(x) - trunk = self.trunk_conv(self.RRDB_trunk(fea)) - fea = fea + trunk - - fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=2, mode='nearest'))) - if self.sf == 4: - fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=2, mode='nearest'))) - out = self.conv_last(self.lrelu(self.HRconv(fea))) - - return out diff --git a/spaces/language-tools/language-demo/app.py b/spaces/language-tools/language-demo/app.py deleted file mode 100644 index db8c18b033eaa56668f10581fb73c430c3ac3c43..0000000000000000000000000000000000000000 --- a/spaces/language-tools/language-demo/app.py +++ /dev/null @@ -1,114 +0,0 @@ -import os - -import fasttext -import gradio as gr -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline -import torch - -title = "Community Tab Language Detection & Translation" -description = """ -When comments are created in the community tab, detect the language of the content. -Then, if the detected language is different from the user's language, display an option to translate it. -""" - -model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M") -tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") -device = 0 if torch.cuda.is_available() else -1 -print(f"Is CUDA available: {torch.cuda.is_available()}") - -language_code_map = { - "English": "eng_Latn", - "French": "fra_Latn", - "German": "deu_Latn", - "Spanish": "spa_Latn", - "Korean": "kor_Hang", - "Japanese": "jpn_Jpan" -} - - -def identify_language(text): - model_file = "lid218e.bin" - model_full_path = os.path.join(os.path.dirname(__file__), model_file) - model = fasttext.load_model(model_full_path) - predictions = model.predict(text, k=1) # e.g., (('__label__eng_Latn',), array([0.81148803])) - - CHAR_TO_STRIP = 9 # To strip away '__label__' from language code - language_code = predictions[0][0][CHAR_TO_STRIP:] - - return language_code - - -def display(user_lang, text): - user_lang_code = language_code_map[user_lang] - language_code = identify_language(text) - - translate_button_visibility = language_code != user_lang_code - - detected_language_text = f""" - Detected Language: {language_code}\n - User Content Language: {user_lang_code}\n - {"" if translate_button_visibility else "[NOT TRANSLATABLE] Detected Language and Content Language are the same"} - """ - - return text, gr.update(value="", placeholder="Leave a comment"), gr.update(value=detected_language_text), gr.update(visible=translate_button_visibility, variant="primary") - - -def translate(text, src_lang, tgt_lang): - CHAR_TO_STRIP = 22 # To strip away 'Detected Language: ' from language code - LANGUAGE_CODE_LENGTH = 8 # To strip away 'Detected Language: ' from language code - src_lang_code = src_lang[CHAR_TO_STRIP:CHAR_TO_STRIP + LANGUAGE_CODE_LENGTH] - tgt_lang_code = language_code_map[tgt_lang] - - translation_pipeline = pipeline( - "translation", model=model, tokenizer=tokenizer, src_lang=src_lang_code, tgt_lang=tgt_lang_code, device=device) - result = translation_pipeline(text) - return result[0]['translation_text'] - -with gr.Blocks() as demo: - gr.HTML( - f""" -
    -
    -

    - {title} -

    -
    -

    - {description} -

    -
    - """ - ) - - user_langugage_radio = gr.Radio(["English", "Spanish", "Korean", "French", "German", "Japanese"], - value="English", label="User Content Language") - - comment_input_textbox = gr.Textbox( - lines=3, label="Write a Comment", placeholder="Leave a comment") - comment_out_textbox = gr.Textbox(lines=3, label="Comment") - detected_lang_markdown = gr.Markdown("", elem_id="detect-lang-md") - - comment_btn = gr.Button("Comment") - - translate_btn = gr.Button("Translate", visible=False) - detected_language_value = gr.Textbox("", visible=False) - - - comment_btn.click(display, - inputs=[user_langugage_radio, comment_input_textbox], - outputs=[ - comment_out_textbox, - comment_input_textbox, - detected_lang_markdown, - translate_btn - ]) - - translate_btn.click(translate, - inputs=[ - comment_out_textbox, - detected_lang_markdown, - user_langugage_radio - ], - outputs=comment_out_textbox) - -demo.launch() diff --git a/spaces/larryyin/experian-bot/README.md b/spaces/larryyin/experian-bot/README.md deleted file mode 100644 index ab884ee910cbd61432fa2eb113d4cb190cdcb914..0000000000000000000000000000000000000000 --- a/spaces/larryyin/experian-bot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Experian Bot -emoji: ⚡ -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5/static/tailwind.config.js b/spaces/latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5/static/tailwind.config.js deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/lcipolina/Print_Gallery/glide_text2im/clip/encoders.py b/spaces/lcipolina/Print_Gallery/glide_text2im/clip/encoders.py deleted file mode 100644 index ee72773c2c891d2dda6d02933e88599b5330b052..0000000000000000000000000000000000000000 --- a/spaces/lcipolina/Print_Gallery/glide_text2im/clip/encoders.py +++ /dev/null @@ -1,497 +0,0 @@ -import math -from collections import OrderedDict -from typing import List, Optional, Tuple, cast - -import attr -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .attention import ( - AttentionInfo, - DenseAttentionMask, - DenseCausalAttentionMask, - make_full_layout, - to_attention_info, -) -from .utils import Affine, LayerNorm, zero_key_bias_grad - -# Constants used in the original CLIP implementation. -image_channel_means = [122.77093945, 116.74601272, 104.09373519] -image_channel_stds = [68.50053285, 66.63215831, 70.32316309] - - -@attr.s(eq=False, repr=False) -class TextEmbedding(nn.Module): - n_vocab: int = attr.ib() - n_context: int = attr.ib() - n_state: int = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - w_voc = torch.empty((self.n_vocab, self.n_state), dtype=torch.float32, device=self.device) - w_pos = torch.empty((self.n_context, self.n_state), dtype=torch.float32, device=self.device) - - with torch.no_grad(): - w_voc.normal_(std=0.02) - w_pos.normal_(std=0.01) - - self.w_voc = nn.Parameter(w_voc) - self.w_pos = nn.Parameter(w_pos) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - if len(x.shape) != 2: - raise ValueError() - - return F.embedding(x, self.w_voc) + self.w_pos[None, :, :] - - -@attr.s(eq=False, repr=False) -class ImageEmbedding(nn.Module): - image_size: int = attr.ib() - patch_size: int = attr.ib() - n_state: int = attr.ib() - n_timestep: int = attr.ib(default=0) - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - if self.image_size % self.patch_size != 0: - raise ValueError() - - n_patch = self.image_size // self.patch_size - patch_proj = torch.empty( - (self.n_state, 3) + 2 * (self.patch_size,), dtype=torch.float32, device=self.device - ) - w_pos = torch.empty( - (1 + n_patch ** 2, self.n_state), dtype=torch.float32, device=self.device - ) - - with torch.no_grad(): - if self.n_timestep == 0: - pred_state = torch.empty((self.n_state,), dtype=torch.float32, device=self.device) - pred_state.normal_(std=1 / np.sqrt(self.n_state)) - self.pred_state = nn.Parameter(pred_state) - else: - w_t = torch.empty( - (self.n_timestep, self.n_state), dtype=torch.float32, device=self.device - ) - w_t.normal_(std=1 / np.sqrt(self.n_state)) - self.w_t = nn.Parameter(w_t) - - patch_proj.normal_(std=np.sqrt(2 / (self.n_state * self.patch_size ** 2))) - w_pos.normal_(std=1 / np.sqrt(self.n_state)) - - self.patch_proj = nn.Parameter(patch_proj) - self.w_pos = nn.Parameter(w_pos) - - self.channel_means = torch.tensor( - image_channel_means, dtype=torch.float32, device=self.device - )[None, :, None, None] - self.channel_stds = torch.tensor( - image_channel_stds, dtype=torch.float32, device=self.device - )[None, :, None, None] - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - - def forward(self, x: torch.Tensor, t: Optional[torch.Tensor] = None) -> torch.Tensor: - if len(x.shape) != 4: - raise ValueError("input should be 4d") - if x.shape[1] != 3: - raise ValueError("input should have 3 channels") - if not (x.shape[2] == self.image_size and x.shape[3] == self.image_size): - raise ValueError(f"input is not {self.image_size} x {self.image_size}") - - if (self.n_timestep == 0 and t is not None) or (self.n_timestep != 0 and t is None): - raise ValueError() - if self.n_timestep != 0: - assert t is not None - if len(t.shape) != 1: - raise ValueError() - if t.shape[0] != x.shape[0]: - raise ValueError() - - x = (x - self.channel_means) / self.channel_stds - x = F.conv2d(x, self.patch_proj, stride=self.patch_size) - x = x.reshape(x.shape[0], self.n_state, (self.image_size // self.patch_size) ** 2).permute( - 0, 2, 1 - ) - - sot = ( - self.pred_state[None, None].expand(x.shape[0], -1, -1) - if self.n_timestep == 0 - else F.embedding(cast(torch.Tensor, t), self.w_t)[:, None] - ) - x = torch.cat((sot, x), dim=1) + self.w_pos[None] - return self.ln(x) - - -@attr.s(eq=False, repr=False) -class AttentionResblock(nn.Module): - n_state: int = attr.ib() - n_resblocks: int = attr.ib() - attn_fn: AttentionInfo = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.n_head_state = self.n_state // self.attn_fn.n_heads - self.qk_scale = 1 / np.sqrt(self.n_head_state) - - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - self.f_q = Affine( - self.n_state, - self.n_state, - std=1 / math.sqrt(self.n_state), - use_bias=True, - bias_filter_fn=zero_key_bias_grad, - device=self.device, - ) - self.f_k = Affine( - self.n_state, - self.n_state, - std=1 / math.sqrt(self.n_state), - use_bias=False, - bias_filter_fn=zero_key_bias_grad, - device=self.device, - ) - self.f_v = Affine( - self.n_state, - self.n_state, - std=1 / math.sqrt(self.n_state), - use_bias=True, - bias_filter_fn=zero_key_bias_grad, - device=self.device, - ) - self.f_c = Affine( - self.n_state, - self.n_state, - use_bias=True, - std=1 / np.sqrt(self.n_state * self.n_resblocks ** 2), - device=self.device, - ) # XXX - - def forward(self, m: torch.Tensor) -> torch.Tensor: - n_context = m.shape[1] - n_query_pad = self.attn_fn.ctx_blks_q * self.attn_fn.block_size - n_context - n_key_pad = self.attn_fn.ctx_blks_k * self.attn_fn.block_size - n_context - assert n_query_pad >= 0 - assert n_key_pad >= 0 - - r = m - r = self.ln(r) - q, k, v = self.f_q(r), self.f_k(r), self.f_v(r) - - if n_query_pad != 0: - q = F.pad(q, (0, 0, 0, n_query_pad)) - - if n_key_pad != 0: - k = F.pad(k, (0, 0, 0, n_key_pad)) - v = F.pad(v, (0, 0, 0, n_key_pad)) - - q = q.view([q.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3)) - k = k.view([k.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3)) - v = v.view([v.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3)) - w = torch.einsum( - "bhcd,bhkd->bhck", q * math.sqrt(self.qk_scale), k * math.sqrt(self.qk_scale) - ) - - if hasattr(self.attn_fn, "pytorch_attn_bias"): - bias = self.attn_fn.pytorch_attn_bias - assert len(bias.shape) in {2, 3} - - if len(bias.shape) == 2: - w = torch.softmax(w + self.attn_fn.pytorch_attn_bias[None, None], dim=-1) - elif len(bias.shape) == 3: - w = torch.softmax(w + self.attn_fn.pytorch_attn_bias[None], dim=-1) - else: - w = torch.softmax(w, dim=-1) - - r = torch.einsum("bhck,bhkd->bhcd", w, v) - r = r.permute((0, 2, 1, 3)).reshape((r.shape[0], -1, self.n_state)) - - if n_query_pad != 0: - r = r[:, :-n_query_pad] - - assert r.shape[1] == n_context - - r = self.f_c(r) - return m + r - - -@attr.s(eq=False, repr=False) -class FullyConnectedResblock(nn.Module): - """ - Not imported from other files because we retain Alec's original inits. - """ - - n_state: int = attr.ib() - n_resblocks: int = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - self.f_1 = Affine( - self.n_state, - 4 * self.n_state, - use_bias=True, - std=np.sqrt(2 / (4 * self.n_state)), - device=self.device, - ) - self.f_2 = Affine( - 4 * self.n_state, - self.n_state, - use_bias=True, - std=1 / np.sqrt(self.n_state * self.n_resblocks ** 2), - device=self.device, - ) # XXX - - def forward(self, m: torch.Tensor) -> torch.Tensor: - r = m - r = self.ln(r) - - r = self.f_2(F.gelu(self.f_1(r))) - return m + r - - -@attr.s(eq=False, repr=False) -class TransformerBlock(nn.Module): - n_state: int = attr.ib() - n_resblocks: int = attr.ib() - attn_fn: AttentionInfo = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.f_attn = AttentionResblock( - self.n_state, - self.n_resblocks, - self.attn_fn, - self.device, - ) - self.f_mlp = FullyConnectedResblock(self.n_state, self.n_resblocks, self.device) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return self.f_mlp(self.f_attn(x)) - - -@attr.s(eq=False, repr=False) -class TextFeatureExtractor(nn.Module): - n_state: int = attr.ib() - n_embd: int = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - self.f = Affine(self.n_state, self.n_embd, use_bias=False, device=self.device) - - def forward( - self, text: torch.Tensor, text_len: torch.Tensor, return_probe_features: bool = False - ) -> torch.Tensor: - if len(text.shape) != 3: - raise ValueError("expected text to be 3d") - if len(text_len.shape) != 1: - raise ValueError("expected text length to be 1d") - if text.shape[0] != text_len.shape[0]: - raise ValueError("text and text_len have inconsistent batch dimensions") - - index = (text_len - 1)[:, None, None].expand(-1, 1, text.shape[2]) - x = torch.gather(text, dim=1, index=index) - assert list(x.shape) == [text.shape[0], 1, text.shape[2]] - - if return_probe_features: - return x[:, 0] - - x = self.ln(x) - return self.f(x[:, 0]) - - -@attr.s(eq=False, repr=False) -class ImageFeatureExtractor(nn.Module): - n_state: int = attr.ib() - n_embd: int = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - self.f = Affine(self.n_state, self.n_embd, use_bias=False, device=self.device) - - def forward(self, x: torch.Tensor, return_probe_features: bool = False) -> torch.Tensor: - if return_probe_features: - return x[:, 0] - - x = self.ln(x[:, :1]) - return self.f(x[:, 0]) - - -@attr.s(eq=False, repr=False) -class TextEncoder(nn.Module): - n_bpe_vocab: int = attr.ib() - max_text_len: int = attr.ib() - n_embd: int = attr.ib() - n_head: int = attr.ib() - n_xf_blocks: int = attr.ib() - n_head_state: int = attr.ib(default=64) - device: torch.device = attr.ib(default=torch.device("cuda")) - block_size: int = attr.ib(init=False, default=32) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.n_state = self.n_head * self.n_head_state - n_rounded_context = self.block_size * int(math.ceil(self.max_text_len / self.block_size)) - n_pad = n_rounded_context - self.max_text_len - - args = ( - n_rounded_context, - n_rounded_context, - self.block_size, - self.n_head, - False, - n_pad, - n_pad, - ) - mask = DenseCausalAttentionMask(*args) - attn_fn = to_attention_info(mask) - - m = 1 - make_full_layout(mask).astype(np.float32) - m[m == 1] = -1e10 - attn_fn.pytorch_attn_bias = torch.from_numpy(m).to(self.device) - - blocks: List[Tuple[str, nn.Module]] = [ - ( - "input", - TextEmbedding( - self.n_bpe_vocab, self.max_text_len, self.n_state, device=self.device - ), - ) - ] - - for i in range(self.n_xf_blocks): - blocks.append( - ( - f"block_{i}", - TransformerBlock(self.n_state, 2 * self.n_xf_blocks, attn_fn, self.device), - ) - ) - - blocks.append( - ("output", TextFeatureExtractor(self.n_state, self.n_embd, device=self.device)) - ) - - self.blocks = nn.ModuleDict(OrderedDict(blocks)) - - def forward( - self, - text: torch.Tensor, - text_len: torch.Tensor, - return_probe_features: bool = False, - ) -> torch.Tensor: - - n_batch = text.shape[0] - h = self.blocks["input"](text) - - for i in range(self.n_xf_blocks): - h = self.blocks[f"block_{i}"](h) - - h = self.blocks["output"](h, text_len, return_probe_features=return_probe_features) - - assert list(h.shape) == [ - n_batch, - self.n_embd if not return_probe_features else self.n_state, - ] - return h - - -@attr.s(eq=False, repr=False) -class ImageEncoder(nn.Module): - image_size: int = attr.ib() - patch_size: int = attr.ib() - n_embd: int = attr.ib() - n_head: int = attr.ib() - n_xf_blocks: int = attr.ib() - n_head_state: int = attr.ib(default=64) - n_timestep: int = attr.ib(default=0) - device: torch.device = attr.ib(default=torch.device("cuda")) - block_size: int = attr.ib(init=False, default=32) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.n_state = self.n_head * self.n_head_state - self.n_context = 1 + (self.image_size // self.patch_size) ** 2 - n_rounded_context = self.block_size * int(math.ceil(self.n_context / self.block_size)) - n_pad = n_rounded_context - self.n_context - - args = ( - n_rounded_context, - n_rounded_context, - self.block_size, - self.n_head, - False, - n_pad, - n_pad, - ) - mask = DenseAttentionMask(*args) - attn_fn = to_attention_info(mask) - - m = 1 - make_full_layout(mask).astype(np.float32) - m[m == 1] = -1e10 - attn_fn.pytorch_attn_bias = torch.from_numpy(m).to(self.device) - - blocks: List[Tuple[str, nn.Module]] = [ - ( - "input", - ImageEmbedding( - self.image_size, - self.patch_size, - self.n_state, - n_timestep=self.n_timestep, - device=self.device, - ), - ) - ] - - for i in range(self.n_xf_blocks): - blocks.append( - ( - f"block_{i}", - TransformerBlock(self.n_state, 2 * self.n_xf_blocks, attn_fn, self.device), - ) - ) - - blocks.append(("output", ImageFeatureExtractor(self.n_state, self.n_embd, self.device))) - - self.blocks = nn.ModuleDict(OrderedDict(blocks)) - - def forward( - self, - image: torch.Tensor, - timesteps: Optional[torch.Tensor] = None, - return_probe_features: bool = False, - ) -> torch.Tensor: - n_batch = image.shape[0] - h = self.blocks["input"](image, t=timesteps) - - for i in range(self.n_xf_blocks): - h = self.blocks[f"block_{i}"](h) - - h = self.blocks["output"](h, return_probe_features=return_probe_features) - - assert list(h.shape) == [ - n_batch, - self.n_embd if not return_probe_features else self.n_state, - ] - - return h diff --git a/spaces/lewisliuX123/wechatglm_demo/scripts/start.sh b/spaces/lewisliuX123/wechatglm_demo/scripts/start.sh deleted file mode 100644 index ac92f8851f6925399f2a4482e271a10ff2accbd5..0000000000000000000000000000000000000000 --- a/spaces/lewisliuX123/wechatglm_demo/scripts/start.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -#后台运行Chat_on_webchat执行脚本 - -cd `dirname $0`/.. -export BASE_DIR=`pwd` -echo $BASE_DIR - -# check the nohup.out log output file -if [ ! -f "${BASE_DIR}/nohup.out" ]; then - touch "${BASE_DIR}/nohup.out" -echo "create file ${BASE_DIR}/nohup.out" -fi - -nohup python3 "${BASE_DIR}/app.py" & tail -f "${BASE_DIR}/nohup.out" - -echo "Chat_on_webchat is starting,you can check the ${BASE_DIR}/nohup.out" diff --git a/spaces/limcheekin/Mistral-7B-Instruct-v0.1-GGUF/main.py b/spaces/limcheekin/Mistral-7B-Instruct-v0.1-GGUF/main.py deleted file mode 100644 index 873db8e1ff392b80472cdf6faf6c153b16487e28..0000000000000000000000000000000000000000 --- a/spaces/limcheekin/Mistral-7B-Instruct-v0.1-GGUF/main.py +++ /dev/null @@ -1,30 +0,0 @@ -from llama_cpp.server.app import create_app, Settings -from fastapi.responses import HTMLResponse -from fastapi.middleware.gzip import GZipMiddleware -import os - -app = create_app( - Settings( - n_threads=2, # set to number of cpu cores - model="model/gguf-model.bin", - embedding=True - ) -) - -app.add_middleware(GZipMiddleware, minimum_size=1000) - -# Read the content of index.html once and store it in memory -with open("index.html", "r") as f: - content = f.read() - - -@app.get("/", response_class=HTMLResponse) -async def read_items(): - return content - -if __name__ == "__main__": - import uvicorn - uvicorn.run(app, - host=os.environ["HOST"], - port=int(os.environ["PORT"]) - ) diff --git a/spaces/lindeberg/whisper-webui/src/vad.py b/spaces/lindeberg/whisper-webui/src/vad.py deleted file mode 100644 index 318e467ac230830fc4a7dd7c5295d23d1d04a31f..0000000000000000000000000000000000000000 --- a/spaces/lindeberg/whisper-webui/src/vad.py +++ /dev/null @@ -1,537 +0,0 @@ -from abc import ABC, abstractmethod -from collections import Counter, deque -import time - -from typing import Any, Deque, Iterator, List, Dict - -from pprint import pprint -from src.modelCache import GLOBAL_MODEL_CACHE, ModelCache - -from src.segments import merge_timestamps -from src.whisperContainer import WhisperCallback - -# Workaround for https://github.com/tensorflow/tensorflow/issues/48797 -try: - import tensorflow as tf -except ModuleNotFoundError: - # Error handling - pass - -import torch - -import ffmpeg -import numpy as np - -from src.utils import format_timestamp -from enum import Enum - -class NonSpeechStrategy(Enum): - """ - Ignore non-speech frames segments. - """ - SKIP = 1 - """ - Just treat non-speech segments as speech. - """ - CREATE_SEGMENT = 2 - """ - Expand speech segments into subsequent non-speech segments. - """ - EXPAND_SEGMENT = 3 - -# Defaults for Silero -SPEECH_TRESHOLD = 0.3 - -# Minimum size of segments to process -MIN_SEGMENT_DURATION = 1 - -# The maximum time for texts from old segments to be used in the next segment -MAX_PROMPT_WINDOW = 0 # seconds (0 = disabled) -PROMPT_NO_SPEECH_PROB = 0.1 # Do not pass the text from segments with a no speech probability higher than this - -VAD_MAX_PROCESSING_CHUNK = 60 * 60 # 60 minutes of audio - -class TranscriptionConfig(ABC): - def __init__(self, non_speech_strategy: NonSpeechStrategy = NonSpeechStrategy.SKIP, - segment_padding_left: float = None, segment_padding_right = None, max_silent_period: float = None, - max_merge_size: float = None, max_prompt_window: float = None, initial_segment_index = -1): - self.non_speech_strategy = non_speech_strategy - self.segment_padding_left = segment_padding_left - self.segment_padding_right = segment_padding_right - self.max_silent_period = max_silent_period - self.max_merge_size = max_merge_size - self.max_prompt_window = max_prompt_window - self.initial_segment_index = initial_segment_index - -class PeriodicTranscriptionConfig(TranscriptionConfig): - def __init__(self, periodic_duration: float, non_speech_strategy: NonSpeechStrategy = NonSpeechStrategy.SKIP, - segment_padding_left: float = None, segment_padding_right = None, max_silent_period: float = None, - max_merge_size: float = None, max_prompt_window: float = None, initial_segment_index = -1): - super().__init__(non_speech_strategy, segment_padding_left, segment_padding_right, max_silent_period, max_merge_size, max_prompt_window, initial_segment_index) - self.periodic_duration = periodic_duration - -class AbstractTranscription(ABC): - def __init__(self, sampling_rate: int = 16000): - self.sampling_rate = sampling_rate - - def get_audio_segment(self, str, start_time: str = None, duration: str = None): - return load_audio(str, self.sampling_rate, start_time, duration) - - def is_transcribe_timestamps_fast(self): - """ - Determine if get_transcribe_timestamps is fast enough to not need parallelization. - """ - return False - - @abstractmethod - def get_transcribe_timestamps(self, audio: str, config: TranscriptionConfig, start_time: float, end_time: float): - """ - Get the start and end timestamps of the sections that should be transcribed by this VAD method. - - Parameters - ---------- - audio: str - The audio file. - config: TranscriptionConfig - The transcription configuration. - - Returns - ------- - A list of start and end timestamps, in fractional seconds. - """ - return - - def get_merged_timestamps(self, timestamps: List[Dict[str, Any]], config: TranscriptionConfig, total_duration: float): - """ - Get the start and end timestamps of the sections that should be transcribed by this VAD method, - after merging the given segments using the specified configuration. - - Parameters - ---------- - audio: str - The audio file. - config: TranscriptionConfig - The transcription configuration. - - Returns - ------- - A list of start and end timestamps, in fractional seconds. - """ - merged = merge_timestamps(timestamps, config.max_silent_period, config.max_merge_size, - config.segment_padding_left, config.segment_padding_right) - - if config.non_speech_strategy != NonSpeechStrategy.SKIP: - # Expand segments to include the gaps between them - if (config.non_speech_strategy == NonSpeechStrategy.CREATE_SEGMENT): - # When we have a prompt window, we create speech segments betwen each segment if we exceed the merge size - merged = self.fill_gaps(merged, total_duration=total_duration, max_expand_size=config.max_merge_size) - elif config.non_speech_strategy == NonSpeechStrategy.EXPAND_SEGMENT: - # With no prompt window, it is better to just expand the segments (this effectively passes the prompt to the next segment) - merged = self.expand_gaps(merged, total_duration=total_duration) - else: - raise Exception("Unknown non-speech strategy: " + str(config.non_speech_strategy)) - - print("Transcribing non-speech:") - pprint(merged) - return merged - - def transcribe(self, audio: str, whisperCallable: WhisperCallback, config: TranscriptionConfig): - """ - Transcribe the given audo file. - - Parameters - ---------- - audio: str - The audio file. - whisperCallable: WhisperCallback - A callback object to call to transcribe each segment. - - Returns - ------- - A list of start and end timestamps, in fractional seconds. - """ - - max_audio_duration = get_audio_duration(audio) - timestamp_segments = self.get_transcribe_timestamps(audio, config, 0, max_audio_duration) - - # Get speech timestamps from full audio file - merged = self.get_merged_timestamps(timestamp_segments, config, max_audio_duration) - - # A deque of transcribed segments that is passed to the next segment as a prompt - prompt_window = deque() - - print("Processing timestamps:") - pprint(merged) - - result = { - 'text': "", - 'segments': [], - 'language': "" - } - languageCounter = Counter() - detected_language = None - - segment_index = config.initial_segment_index - - # For each time segment, run whisper - for segment in merged: - segment_index += 1 - segment_start = segment['start'] - segment_end = segment['end'] - segment_expand_amount = segment.get('expand_amount', 0) - segment_gap = segment.get('gap', False) - - segment_duration = segment_end - segment_start - - if segment_duration < MIN_SEGMENT_DURATION: - continue; - - # Audio to run on Whisper - segment_audio = self.get_audio_segment(audio, start_time = str(segment_start), duration = str(segment_duration)) - # Previous segments to use as a prompt - segment_prompt = ' '.join([segment['text'] for segment in prompt_window]) if len(prompt_window) > 0 else None - - # Detected language - detected_language = languageCounter.most_common(1)[0][0] if len(languageCounter) > 0 else None - - print("Running whisper from ", format_timestamp(segment_start), " to ", format_timestamp(segment_end), ", duration: ", - segment_duration, "expanded: ", segment_expand_amount, "prompt: ", segment_prompt, "language: ", detected_language) - segment_result = whisperCallable.invoke(segment_audio, segment_index, segment_prompt, detected_language) - - adjusted_segments = self.adjust_timestamp(segment_result["segments"], adjust_seconds=segment_start, max_source_time=segment_duration) - - # Propagate expand amount to the segments - if (segment_expand_amount > 0): - segment_without_expansion = segment_duration - segment_expand_amount - - for adjusted_segment in adjusted_segments: - adjusted_segment_end = adjusted_segment['end'] - - # Add expand amount if the segment got expanded - if (adjusted_segment_end > segment_without_expansion): - adjusted_segment["expand_amount"] = adjusted_segment_end - segment_without_expansion - - # Append to output - result['text'] += segment_result['text'] - result['segments'].extend(adjusted_segments) - - # Increment detected language - if not segment_gap: - languageCounter[segment_result['language']] += 1 - - # Update prompt window - self.__update_prompt_window(prompt_window, adjusted_segments, segment_end, segment_gap, config) - - if detected_language is not None: - result['language'] = detected_language - - return result - - def __update_prompt_window(self, prompt_window: Deque, adjusted_segments: List, segment_end: float, segment_gap: bool, config: TranscriptionConfig): - if (config.max_prompt_window is not None and config.max_prompt_window > 0): - # Add segments to the current prompt window (unless it is a speech gap) - if not segment_gap: - for segment in adjusted_segments: - if segment.get('no_speech_prob', 0) <= PROMPT_NO_SPEECH_PROB: - prompt_window.append(segment) - - while (len(prompt_window) > 0): - first_end_time = prompt_window[0].get('end', 0) - # Time expanded in the segments should be discounted from the prompt window - first_expand_time = prompt_window[0].get('expand_amount', 0) - - if (first_end_time - first_expand_time < segment_end - config.max_prompt_window): - prompt_window.popleft() - else: - break - - def include_gaps(self, segments: Iterator[dict], min_gap_length: float, total_duration: float): - result = [] - last_end_time = 0 - - for segment in segments: - segment_start = float(segment['start']) - segment_end = float(segment['end']) - - if (last_end_time != segment_start): - delta = segment_start - last_end_time - - if (min_gap_length is None or delta >= min_gap_length): - result.append( { 'start': last_end_time, 'end': segment_start, 'gap': True } ) - - last_end_time = segment_end - result.append(segment) - - # Also include total duration if specified - if (total_duration is not None and last_end_time < total_duration): - delta = total_duration - segment_start - - if (min_gap_length is None or delta >= min_gap_length): - result.append( { 'start': last_end_time, 'end': total_duration, 'gap': True } ) - - return result - - # Expand the end time of each segment to the start of the next segment - def expand_gaps(self, segments: List[Dict[str, Any]], total_duration: float): - result = [] - - if len(segments) == 0: - return result - - # Add gap at the beginning if needed - if (segments[0]['start'] > 0): - result.append({ 'start': 0, 'end': segments[0]['start'], 'gap': True } ) - - for i in range(len(segments) - 1): - current_segment = segments[i] - next_segment = segments[i + 1] - - delta = next_segment['start'] - current_segment['end'] - - # Expand if the gap actually exists - if (delta >= 0): - current_segment = current_segment.copy() - current_segment['expand_amount'] = delta - current_segment['end'] = next_segment['start'] - - result.append(current_segment) - - # Add last segment - last_segment = segments[-1] - result.append(last_segment) - - # Also include total duration if specified - if (total_duration is not None): - last_segment = result[-1] - - if (last_segment['end'] < total_duration): - last_segment = last_segment.copy() - last_segment['end'] = total_duration - result[-1] = last_segment - - return result - - def fill_gaps(self, segments: List[Dict[str, Any]], total_duration: float, max_expand_size: float = None): - result = [] - - if len(segments) == 0: - return result - - # Add gap at the beginning if needed - if (segments[0]['start'] > 0): - result.append({ 'start': 0, 'end': segments[0]['start'], 'gap': True } ) - - for i in range(len(segments) - 1): - expanded = False - current_segment = segments[i] - next_segment = segments[i + 1] - - delta = next_segment['start'] - current_segment['end'] - - if (max_expand_size is not None and delta <= max_expand_size): - # Just expand the current segment - current_segment = current_segment.copy() - current_segment['expand_amount'] = delta - current_segment['end'] = next_segment['start'] - expanded = True - - result.append(current_segment) - - # Add a gap to the next segment if needed - if (delta >= 0 and not expanded): - result.append({ 'start': current_segment['end'], 'end': next_segment['start'], 'gap': True } ) - - # Add last segment - last_segment = segments[-1] - result.append(last_segment) - - # Also include total duration if specified - if (total_duration is not None): - last_segment = result[-1] - - delta = total_duration - last_segment['end'] - - if (delta > 0): - if (max_expand_size is not None and delta <= max_expand_size): - # Expand the last segment - last_segment = last_segment.copy() - last_segment['expand_amount'] = delta - last_segment['end'] = total_duration - result[-1] = last_segment - else: - result.append({ 'start': last_segment['end'], 'end': total_duration, 'gap': True } ) - - return result - - def adjust_timestamp(self, segments: Iterator[dict], adjust_seconds: float, max_source_time: float = None): - result = [] - - for segment in segments: - segment_start = float(segment['start']) - segment_end = float(segment['end']) - - # Filter segments? - if (max_source_time is not None): - if (segment_start > max_source_time): - continue - segment_end = min(max_source_time, segment_end) - - new_segment = segment.copy() - - # Add to start and end - new_segment['start'] = segment_start + adjust_seconds - new_segment['end'] = segment_end + adjust_seconds - result.append(new_segment) - return result - - def multiply_timestamps(self, timestamps: List[Dict[str, Any]], factor: float): - result = [] - - for entry in timestamps: - start = entry['start'] - end = entry['end'] - - result.append({ - 'start': start * factor, - 'end': end * factor - }) - return result - - -class VadSileroTranscription(AbstractTranscription): - def __init__(self, sampling_rate: int = 16000, cache: ModelCache = None): - super().__init__(sampling_rate=sampling_rate) - self.model = None - self.cache = cache - self._initialize_model() - - def _initialize_model(self): - if (self.cache is not None): - model_key = "VadSileroTranscription" - self.model, self.get_speech_timestamps = self.cache.get(model_key, self._create_model) - print("Loaded Silerio model from cache.") - else: - self.model, self.get_speech_timestamps = self._create_model() - print("Created Silerio model") - - def _create_model(self): - model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad', model='silero_vad') - - # Silero does not benefit from multi-threading - torch.set_num_threads(1) # JIT - (get_speech_timestamps, _, _, _, _) = utils - - return model, get_speech_timestamps - - def get_transcribe_timestamps(self, audio: str, config: TranscriptionConfig, start_time: float, end_time: float): - result = [] - - print("Getting timestamps from audio file: {}, start: {}, duration: {}".format(audio, start_time, end_time)) - perf_start_time = time.perf_counter() - - # Divide procesisng of audio into chunks - chunk_start = start_time - - while (chunk_start < end_time): - chunk_duration = min(end_time - chunk_start, VAD_MAX_PROCESSING_CHUNK) - - print("Processing VAD in chunk from {} to {}".format(format_timestamp(chunk_start), format_timestamp(chunk_start + chunk_duration))) - wav = self.get_audio_segment(audio, str(chunk_start), str(chunk_duration)) - - sample_timestamps = self.get_speech_timestamps(wav, self.model, sampling_rate=self.sampling_rate, threshold=SPEECH_TRESHOLD) - seconds_timestamps = self.multiply_timestamps(sample_timestamps, factor=1 / self.sampling_rate) - adjusted = self.adjust_timestamp(seconds_timestamps, adjust_seconds=chunk_start, max_source_time=chunk_start + chunk_duration) - - #pprint(adjusted) - - result.extend(adjusted) - chunk_start += chunk_duration - - perf_end_time = time.perf_counter() - print("VAD processing took {} seconds".format(perf_end_time - perf_start_time)) - - return result - - def __getstate__(self): - # We only need the sampling rate - return { 'sampling_rate': self.sampling_rate } - - def __setstate__(self, state): - self.sampling_rate = state['sampling_rate'] - self.model = None - # Use the global cache - self.cache = GLOBAL_MODEL_CACHE - self._initialize_model() - -# A very simple VAD that just marks every N seconds as speech -class VadPeriodicTranscription(AbstractTranscription): - def __init__(self, sampling_rate: int = 16000): - super().__init__(sampling_rate=sampling_rate) - - def is_transcribe_timestamps_fast(self): - # This is a very fast VAD - no need to parallelize it - return True - - def get_transcribe_timestamps(self, audio: str, config: PeriodicTranscriptionConfig, start_time: float, end_time: float): - result = [] - - # Generate a timestamp every N seconds - start_timestamp = start_time - - while (start_timestamp < end_time): - end_timestamp = min(start_timestamp + config.periodic_duration, end_time) - segment_duration = end_timestamp - start_timestamp - - # Minimum duration is 1 second - if (segment_duration >= 1): - result.append( { 'start': start_timestamp, 'end': end_timestamp } ) - - start_timestamp = end_timestamp - - return result - -def get_audio_duration(file: str): - return float(ffmpeg.probe(file)["format"]["duration"]) - -def load_audio(file: str, sample_rate: int = 16000, - start_time: str = None, duration: str = None): - """ - Open an audio file and read as mono waveform, resampling as necessary - - Parameters - ---------- - file: str - The audio file to open - - sr: int - The sample rate to resample the audio if necessary - - start_time: str - The start time, using the standard FFMPEG time duration syntax, or None to disable. - - duration: str - The duration, using the standard FFMPEG time duration syntax, or None to disable. - - Returns - ------- - A NumPy array containing the audio waveform, in float32 dtype. - """ - try: - inputArgs = {'threads': 0} - - if (start_time is not None): - inputArgs['ss'] = start_time - if (duration is not None): - inputArgs['t'] = duration - - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - out, _ = ( - ffmpeg.input(file, **inputArgs) - .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sample_rate) - .run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True) - ) - except ffmpeg.Error as e: - raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") - - return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 \ No newline at end of file diff --git a/spaces/lint/anime_controlnet/src/ui_shared.py b/spaces/lint/anime_controlnet/src/ui_shared.py deleted file mode 100644 index 0f50a92be53c45e7753a994e3f1ac23292b4d20f..0000000000000000000000000000000000000000 --- a/spaces/lint/anime_controlnet/src/ui_shared.py +++ /dev/null @@ -1,24 +0,0 @@ -import diffusers.schedulers -import os -from pathlib import Path - -assets_directory = Path(__file__).parent / "ui_assets" - -is_hfspace = "SPACE_REPO_NAME" in os.environ - -scheduler_dict = { - k: v - for k, v in diffusers.schedulers.__dict__.items() - if "Scheduler" in k and "Flax" not in k -} -scheduler_dict.pop( - "VQDiffusionScheduler", None -) # requires unique parameter, unlike other schedulers -scheduler_names = list(scheduler_dict.keys()) -default_scheduler = "UniPCMultistepScheduler" - -with open(assets_directory / "model_ids.txt", "r") as fp: - model_ids = fp.read().splitlines() - -with open(assets_directory / "controlnet_ids.txt", "r") as fp: - controlnet_ids = fp.read().splitlines() diff --git a/spaces/lj1995/vocal2guitar/uvr5_pack/lib_v5/layers_123812KB .py b/spaces/lj1995/vocal2guitar/uvr5_pack/lib_v5/layers_123812KB .py deleted file mode 100644 index 9835dc0f0dd66a7ef3517101180ec2c54eb6011d..0000000000000000000000000000000000000000 --- a/spaces/lj1995/vocal2guitar/uvr5_pack/lib_v5/layers_123812KB .py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from uvr5_pack.lib_v5 import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/lordvader31/text-matching/app.py b/spaces/lordvader31/text-matching/app.py deleted file mode 100644 index 800de8fa3ebed2353cc44bb62ce64aca7d927d3c..0000000000000000000000000000000000000000 --- a/spaces/lordvader31/text-matching/app.py +++ /dev/null @@ -1,74 +0,0 @@ -import streamlit as st -from topics import TopicModelling -import mdforest -import utils -import os - - - -col1, mid, col2 = st.columns([30,5,20]) -with col1: - st.title("Welcome to Embeddr") - st.markdown("This is a demo of _one of the many_ use cases for an embedding of all your notes. This application lets you find **common ideas** between any two notes.") - st.markdown("You can upload two markdown files and the application will find the common ideas between them. It will generate insights based on the common ideas.") - st.markdown("**I will be building a better embedding model soon.** Stay tuned for updates. This is just a demo of what is possible with a good embedding model.") -with col2: - st.markdown("### [Sign up for updates](https://embeddr.my.canva.site/)") - st.image("media/qrcode.png") - -st.markdown("---") - -st.markdown("## Drop in two documents and get insights between them.") - -col3, mid2, col4 = st.columns([40,5,40]) -with col3: - st.markdown("### Drop the first document") - file1 = st.file_uploader("Upload a file", type=["md", "txt"], key="first") -with col4: - st.markdown("### Drop the second document") - file2 = st.file_uploader("Upload a file", type=["md", "txt"], key="second") - -topics = {} -results = {} - -embedder = utils.load_model() -nlp = utils.load_nlp() - -if not os.path.exists("./prompter/"): - os.mkdir("./prompter/") - -if file1 is not None and file2 is not None: - - input_text1 = file1.read().decode("utf-8") - input_text2 = file2.read().decode("utf-8") - - cleaned_text1 = mdforest.clean_markdown(input_text1) - cleaned_text2 = mdforest.clean_markdown(input_text2) - - st.title("Generating insights") - - with st.spinner('Generating insights...'): - - insight1 = TopicModelling(cleaned_text1) - insight2 = TopicModelling(cleaned_text2) - - keywords1, concepts1 = insight1.generate_topics() - topics['insight1'] = [keywords1, concepts1] - keywords2, concepts2 = insight2.generate_topics() - topics['insight2'] = [keywords2, concepts2] - - with st.spinner("Flux capacitor is fluxing..."): - clutered = utils.cluster_based_on_topics(nlp, embedder, cleaned_text1, cleaned_text2, num_clusters=3) - - with st.spinner("Polishing up"): - results = utils.generate_insights(topics, file1.name, file2.name, cleaned_text1, cleaned_text2, clutered) - st.success("Done!") - - st.title("Insights generated") - st.markdown("### The following insights are common to both documents.") - for result in results: - with st.expander(result["name"]): - st.write(result["description"]) - st.markdown("Related Concepts:") - for insight in result["concepts"]: - st.markdown(f" - {insight}") \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/pybind11/include/pybind11/chrono.h b/spaces/ma-xu/LIVE/pybind11/include/pybind11/chrono.h deleted file mode 100644 index 6127c659bdcef2da89d9fb80568f1c570bbb6534..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/include/pybind11/chrono.h +++ /dev/null @@ -1,191 +0,0 @@ -/* - pybind11/chrono.h: Transparent conversion between std::chrono and python's datetime - - Copyright (c) 2016 Trent Houliston and - Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "pybind11.h" -#include -#include -#include -#include - -// Backport the PyDateTime_DELTA functions from Python3.3 if required -#ifndef PyDateTime_DELTA_GET_DAYS -#define PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days) -#endif -#ifndef PyDateTime_DELTA_GET_SECONDS -#define PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds) -#endif -#ifndef PyDateTime_DELTA_GET_MICROSECONDS -#define PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds) -#endif - -PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) -PYBIND11_NAMESPACE_BEGIN(detail) - -template class duration_caster { -public: - typedef typename type::rep rep; - typedef typename type::period period; - - typedef std::chrono::duration> days; - - bool load(handle src, bool) { - using namespace std::chrono; - - // Lazy initialise the PyDateTime import - if (!PyDateTimeAPI) { PyDateTime_IMPORT; } - - if (!src) return false; - // If invoked with datetime.delta object - if (PyDelta_Check(src.ptr())) { - value = type(duration_cast>( - days(PyDateTime_DELTA_GET_DAYS(src.ptr())) - + seconds(PyDateTime_DELTA_GET_SECONDS(src.ptr())) - + microseconds(PyDateTime_DELTA_GET_MICROSECONDS(src.ptr())))); - return true; - } - // If invoked with a float we assume it is seconds and convert - else if (PyFloat_Check(src.ptr())) { - value = type(duration_cast>(duration(PyFloat_AsDouble(src.ptr())))); - return true; - } - else return false; - } - - // If this is a duration just return it back - static const std::chrono::duration& get_duration(const std::chrono::duration &src) { - return src; - } - - // If this is a time_point get the time_since_epoch - template static std::chrono::duration get_duration(const std::chrono::time_point> &src) { - return src.time_since_epoch(); - } - - static handle cast(const type &src, return_value_policy /* policy */, handle /* parent */) { - using namespace std::chrono; - - // Use overloaded function to get our duration from our source - // Works out if it is a duration or time_point and get the duration - auto d = get_duration(src); - - // Lazy initialise the PyDateTime import - if (!PyDateTimeAPI) { PyDateTime_IMPORT; } - - // Declare these special duration types so the conversions happen with the correct primitive types (int) - using dd_t = duration>; - using ss_t = duration>; - using us_t = duration; - - auto dd = duration_cast(d); - auto subd = d - dd; - auto ss = duration_cast(subd); - auto us = duration_cast(subd - ss); - return PyDelta_FromDSU(dd.count(), ss.count(), us.count()); - } - - PYBIND11_TYPE_CASTER(type, _("datetime.timedelta")); -}; - -// This is for casting times on the system clock into datetime.datetime instances -template class type_caster> { -public: - typedef std::chrono::time_point type; - bool load(handle src, bool) { - using namespace std::chrono; - - // Lazy initialise the PyDateTime import - if (!PyDateTimeAPI) { PyDateTime_IMPORT; } - - if (!src) return false; - - std::tm cal; - microseconds msecs; - - if (PyDateTime_Check(src.ptr())) { - cal.tm_sec = PyDateTime_DATE_GET_SECOND(src.ptr()); - cal.tm_min = PyDateTime_DATE_GET_MINUTE(src.ptr()); - cal.tm_hour = PyDateTime_DATE_GET_HOUR(src.ptr()); - cal.tm_mday = PyDateTime_GET_DAY(src.ptr()); - cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1; - cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900; - cal.tm_isdst = -1; - msecs = microseconds(PyDateTime_DATE_GET_MICROSECOND(src.ptr())); - } else if (PyDate_Check(src.ptr())) { - cal.tm_sec = 0; - cal.tm_min = 0; - cal.tm_hour = 0; - cal.tm_mday = PyDateTime_GET_DAY(src.ptr()); - cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1; - cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900; - cal.tm_isdst = -1; - msecs = microseconds(0); - } else if (PyTime_Check(src.ptr())) { - cal.tm_sec = PyDateTime_TIME_GET_SECOND(src.ptr()); - cal.tm_min = PyDateTime_TIME_GET_MINUTE(src.ptr()); - cal.tm_hour = PyDateTime_TIME_GET_HOUR(src.ptr()); - cal.tm_mday = 1; // This date (day, month, year) = (1, 0, 70) - cal.tm_mon = 0; // represents 1-Jan-1970, which is the first - cal.tm_year = 70; // earliest available date for Python's datetime - cal.tm_isdst = -1; - msecs = microseconds(PyDateTime_TIME_GET_MICROSECOND(src.ptr())); - } - else return false; - - value = system_clock::from_time_t(std::mktime(&cal)) + msecs; - return true; - } - - static handle cast(const std::chrono::time_point &src, return_value_policy /* policy */, handle /* parent */) { - using namespace std::chrono; - - // Lazy initialise the PyDateTime import - if (!PyDateTimeAPI) { PyDateTime_IMPORT; } - - // Get out microseconds, and make sure they are positive, to avoid bug in eastern hemisphere time zones - // (cfr. https://github.com/pybind/pybind11/issues/2417) - using us_t = duration; - auto us = duration_cast(src.time_since_epoch() % seconds(1)); - if (us.count() < 0) - us += seconds(1); - - // Subtract microseconds BEFORE `system_clock::to_time_t`, because: - // > If std::time_t has lower precision, it is implementation-defined whether the value is rounded or truncated. - // (https://en.cppreference.com/w/cpp/chrono/system_clock/to_time_t) - std::time_t tt = system_clock::to_time_t(time_point_cast(src - us)); - // this function uses static memory so it's best to copy it out asap just in case - // otherwise other code that is using localtime may break this (not just python code) - std::tm localtime = *std::localtime(&tt); - - return PyDateTime_FromDateAndTime(localtime.tm_year + 1900, - localtime.tm_mon + 1, - localtime.tm_mday, - localtime.tm_hour, - localtime.tm_min, - localtime.tm_sec, - us.count()); - } - PYBIND11_TYPE_CASTER(type, _("datetime.datetime")); -}; - -// Other clocks that are not the system clock are not measured as datetime.datetime objects -// since they are not measured on calendar time. So instead we just make them timedeltas -// Or if they have passed us a time as a float we convert that -template class type_caster> -: public duration_caster> { -}; - -template class type_caster> -: public duration_caster> { -}; - -PYBIND11_NAMESPACE_END(detail) -PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_pytypes.py b/spaces/ma-xu/LIVE/pybind11/tests/test_pytypes.py deleted file mode 100644 index 95cc94af8c89517bf4a993af43041414c46d4dd5..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/tests/test_pytypes.py +++ /dev/null @@ -1,392 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import division -import pytest -import sys - -import env # noqa: F401 - -from pybind11_tests import pytypes as m -from pybind11_tests import debug_enabled - - -def test_int(doc): - assert doc(m.get_int) == "get_int() -> int" - - -def test_iterator(doc): - assert doc(m.get_iterator) == "get_iterator() -> Iterator" - - -def test_iterable(doc): - assert doc(m.get_iterable) == "get_iterable() -> Iterable" - - -def test_list(capture, doc): - with capture: - lst = m.get_list() - assert lst == ["inserted-0", "overwritten", "inserted-2"] - - lst.append("value2") - m.print_list(lst) - assert capture.unordered == """ - Entry at position 0: value - list item 0: inserted-0 - list item 1: overwritten - list item 2: inserted-2 - list item 3: value2 - """ - - assert doc(m.get_list) == "get_list() -> list" - assert doc(m.print_list) == "print_list(arg0: list) -> None" - - -def test_none(capture, doc): - assert doc(m.get_none) == "get_none() -> None" - assert doc(m.print_none) == "print_none(arg0: None) -> None" - - -def test_set(capture, doc): - s = m.get_set() - assert s == {"key1", "key2", "key3"} - - with capture: - s.add("key4") - m.print_set(s) - assert capture.unordered == """ - key: key1 - key: key2 - key: key3 - key: key4 - """ - - assert not m.set_contains(set([]), 42) - assert m.set_contains({42}, 42) - assert m.set_contains({"foo"}, "foo") - - assert doc(m.get_list) == "get_list() -> list" - assert doc(m.print_list) == "print_list(arg0: list) -> None" - - -def test_dict(capture, doc): - d = m.get_dict() - assert d == {"key": "value"} - - with capture: - d["key2"] = "value2" - m.print_dict(d) - assert capture.unordered == """ - key: key, value=value - key: key2, value=value2 - """ - - assert not m.dict_contains({}, 42) - assert m.dict_contains({42: None}, 42) - assert m.dict_contains({"foo": None}, "foo") - - assert doc(m.get_dict) == "get_dict() -> dict" - assert doc(m.print_dict) == "print_dict(arg0: dict) -> None" - - assert m.dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3} - - -def test_str(doc): - assert m.str_from_string().encode().decode() == "baz" - assert m.str_from_bytes().encode().decode() == "boo" - - assert doc(m.str_from_bytes) == "str_from_bytes() -> str" - - class A(object): - def __str__(self): - return "this is a str" - - def __repr__(self): - return "this is a repr" - - assert m.str_from_object(A()) == "this is a str" - assert m.repr_from_object(A()) == "this is a repr" - - s1, s2 = m.str_format() - assert s1 == "1 + 2 = 3" - assert s1 == s2 - - -def test_bytes(doc): - assert m.bytes_from_string().decode() == "foo" - assert m.bytes_from_str().decode() == "bar" - - assert doc(m.bytes_from_str) == "bytes_from_str() -> {}".format( - "str" if env.PY2 else "bytes" - ) - - -def test_capsule(capture): - pytest.gc_collect() - with capture: - a = m.return_capsule_with_destructor() - del a - pytest.gc_collect() - assert capture.unordered == """ - creating capsule - destructing capsule - """ - - with capture: - a = m.return_capsule_with_destructor_2() - del a - pytest.gc_collect() - assert capture.unordered == """ - creating capsule - destructing capsule: 1234 - """ - - with capture: - a = m.return_capsule_with_name_and_destructor() - del a - pytest.gc_collect() - assert capture.unordered == """ - created capsule (1234, 'pointer type description') - destructing capsule (1234, 'pointer type description') - """ - - -def test_accessors(): - class SubTestObject: - attr_obj = 1 - attr_char = 2 - - class TestObject: - basic_attr = 1 - begin_end = [1, 2, 3] - d = {"operator[object]": 1, "operator[char *]": 2} - sub = SubTestObject() - - def func(self, x, *args): - return self.basic_attr + x + sum(args) - - d = m.accessor_api(TestObject()) - assert d["basic_attr"] == 1 - assert d["begin_end"] == [1, 2, 3] - assert d["operator[object]"] == 1 - assert d["operator[char *]"] == 2 - assert d["attr(object)"] == 1 - assert d["attr(char *)"] == 2 - assert d["missing_attr_ptr"] == "raised" - assert d["missing_attr_chain"] == "raised" - assert d["is_none"] is False - assert d["operator()"] == 2 - assert d["operator*"] == 7 - assert d["implicit_list"] == [1, 2, 3] - assert all(x in TestObject.__dict__ for x in d["implicit_dict"]) - - assert m.tuple_accessor(tuple()) == (0, 1, 2) - - d = m.accessor_assignment() - assert d["get"] == 0 - assert d["deferred_get"] == 0 - assert d["set"] == 1 - assert d["deferred_set"] == 1 - assert d["var"] == 99 - - -def test_constructors(): - """C++ default and converting constructors are equivalent to type calls in Python""" - types = [bytes, str, bool, int, float, tuple, list, dict, set] - expected = {t.__name__: t() for t in types} - if env.PY2: - # Note that bytes.__name__ == 'str' in Python 2. - # pybind11::str is unicode even under Python 2. - expected["bytes"] = bytes() - expected["str"] = unicode() # noqa: F821 - assert m.default_constructors() == expected - - data = { - bytes: b'41', # Currently no supported or working conversions. - str: 42, - bool: "Not empty", - int: "42", - float: "+1e3", - tuple: range(3), - list: range(3), - dict: [("two", 2), ("one", 1), ("three", 3)], - set: [4, 4, 5, 6, 6, 6], - memoryview: b'abc' - } - inputs = {k.__name__: v for k, v in data.items()} - expected = {k.__name__: k(v) for k, v in data.items()} - if env.PY2: # Similar to the above. See comments above. - inputs["bytes"] = b'41' - inputs["str"] = 42 - expected["bytes"] = b'41' - expected["str"] = u"42" - - assert m.converting_constructors(inputs) == expected - assert m.cast_functions(inputs) == expected - - # Converting constructors and cast functions should just reference rather - # than copy when no conversion is needed: - noconv1 = m.converting_constructors(expected) - for k in noconv1: - assert noconv1[k] is expected[k] - - noconv2 = m.cast_functions(expected) - for k in noconv2: - assert noconv2[k] is expected[k] - - -def test_pybind11_str_raw_str(): - # specifically to exercise pybind11::str::raw_str - cvt = m.convert_to_pybind11_str - assert cvt(u"Str") == u"Str" - assert cvt(b'Bytes') == u"Bytes" if env.PY2 else "b'Bytes'" - assert cvt(None) == u"None" - assert cvt(False) == u"False" - assert cvt(True) == u"True" - assert cvt(42) == u"42" - assert cvt(2**65) == u"36893488147419103232" - assert cvt(-1.50) == u"-1.5" - assert cvt(()) == u"()" - assert cvt((18,)) == u"(18,)" - assert cvt([]) == u"[]" - assert cvt([28]) == u"[28]" - assert cvt({}) == u"{}" - assert cvt({3: 4}) == u"{3: 4}" - assert cvt(set()) == u"set([])" if env.PY2 else "set()" - assert cvt({3, 3}) == u"set([3])" if env.PY2 else "{3}" - - valid_orig = u"DZ" - valid_utf8 = valid_orig.encode("utf-8") - valid_cvt = cvt(valid_utf8) - assert type(valid_cvt) == bytes # Probably surprising. - assert valid_cvt == b'\xc7\xb1' - - malformed_utf8 = b'\x80' - malformed_cvt = cvt(malformed_utf8) - assert type(malformed_cvt) == bytes # Probably surprising. - assert malformed_cvt == b'\x80' - - -def test_implicit_casting(): - """Tests implicit casting when assigning or appending to dicts and lists.""" - z = m.get_implicit_casting() - assert z['d'] == { - 'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc', - 'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3', - 'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44 - } - assert z['l'] == [3, 6, 9, 12, 15] - - -def test_print(capture): - with capture: - m.print_function() - assert capture == """ - Hello, World! - 1 2.0 three True -- multiple args - *args-and-a-custom-separator - no new line here -- next print - flush - py::print + str.format = this - """ - assert capture.stderr == "this goes to stderr" - - with pytest.raises(RuntimeError) as excinfo: - m.print_failure() - assert str(excinfo.value) == "make_tuple(): unable to convert " + ( - "argument of type 'UnregisteredType' to Python object" - if debug_enabled else - "arguments to Python object (compile in debug mode for details)" - ) - - -def test_hash(): - class Hashable(object): - def __init__(self, value): - self.value = value - - def __hash__(self): - return self.value - - class Unhashable(object): - __hash__ = None - - assert m.hash_function(Hashable(42)) == 42 - with pytest.raises(TypeError): - m.hash_function(Unhashable()) - - -def test_number_protocol(): - for a, b in [(1, 1), (3, 5)]: - li = [a == b, a != b, a < b, a <= b, a > b, a >= b, a + b, - a - b, a * b, a / b, a | b, a & b, a ^ b, a >> b, a << b] - assert m.test_number_protocol(a, b) == li - - -def test_list_slicing(): - li = list(range(100)) - assert li[::2] == m.test_list_slicing(li) - - -@pytest.mark.parametrize('method, args, fmt, expected_view', [ - (m.test_memoryview_object, (b'red',), 'B', b'red'), - (m.test_memoryview_buffer_info, (b'green',), 'B', b'green'), - (m.test_memoryview_from_buffer, (False,), 'h', [3, 1, 4, 1, 5]), - (m.test_memoryview_from_buffer, (True,), 'H', [2, 7, 1, 8]), - (m.test_memoryview_from_buffer_nativeformat, (), '@i', [4, 7, 5]), -]) -def test_memoryview(method, args, fmt, expected_view): - view = method(*args) - assert isinstance(view, memoryview) - assert view.format == fmt - if isinstance(expected_view, bytes) or not env.PY2: - view_as_list = list(view) - else: - # Using max to pick non-zero byte (big-endian vs little-endian). - view_as_list = [max([ord(c) for c in s]) for s in view] - assert view_as_list == list(expected_view) - - -@pytest.mark.xfail("env.PYPY", reason="getrefcount is not available") -@pytest.mark.parametrize('method', [ - m.test_memoryview_object, - m.test_memoryview_buffer_info, -]) -def test_memoryview_refcount(method): - buf = b'\x0a\x0b\x0c\x0d' - ref_before = sys.getrefcount(buf) - view = method(buf) - ref_after = sys.getrefcount(buf) - assert ref_before < ref_after - assert list(view) == list(buf) - - -def test_memoryview_from_buffer_empty_shape(): - view = m.test_memoryview_from_buffer_empty_shape() - assert isinstance(view, memoryview) - assert view.format == 'B' - if env.PY2: - # Python 2 behavior is weird, but Python 3 (the future) is fine. - # PyPy3 has -#include -#include -#include -#include - -namespace thrust -{ -namespace detail -{ - - -// XXX WAR an unfortunate circular #inclusion problem -template class temporary_array; - - -} // end detail - -namespace system -{ -namespace detail -{ -namespace sequential -{ - - -__thrust_exec_check_disable__ -template -__host__ __device__ -void iter_swap(ForwardIterator1 iter1, ForwardIterator2 iter2) -{ - // XXX this isn't correct because it doesn't use thrust::swap - using namespace thrust::detail; - - typedef typename thrust::iterator_value::type T; - - T temp = *iter1; - *iter1 = *iter2; - *iter2 = temp; -} - - -__thrust_exec_check_disable__ -template -__host__ __device__ - ForwardIterator partition(sequential::execution_policy &, - ForwardIterator first, - ForwardIterator last, - Predicate pred) -{ - if(first == last) - return first; - - // wrap pred - thrust::detail::wrapped_function< - Predicate, - bool - > wrapped_pred(pred); - - while(wrapped_pred(*first)) - { - if(++first == last) - return first; - } - - ForwardIterator next = first; - - while(++next != last) - { - if(wrapped_pred(*next)) - { - iter_swap(first, next); - ++first; - } - } - - return first; -} - - -__thrust_exec_check_disable__ -template -__host__ __device__ - ForwardIterator partition(sequential::execution_policy &, - ForwardIterator first, - ForwardIterator last, - InputIterator stencil_first, - Predicate pred) -{ - if(first == last) - return first; - - // wrap pred - thrust::detail::wrapped_function< - Predicate, - bool - > wrapped_pred(pred); - - while(wrapped_pred(*stencil_first)) - { - ++stencil_first; - if(++first == last) - { - return first; - } - } - - ForwardIterator next = first; - - // advance stencil to next element as well - ++stencil_first; - - while(++next != last) - { - if(wrapped_pred(*stencil_first)) - { - iter_swap(first, next); - ++first; - } - - ++stencil_first; - } - - return first; -} - - -__thrust_exec_check_disable__ -template -__host__ __device__ - ForwardIterator stable_partition(sequential::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - Predicate pred) -{ - // wrap pred - thrust::detail::wrapped_function< - Predicate, - bool - > wrapped_pred(pred); - - typedef typename thrust::iterator_value::type T; - - typedef thrust::detail::temporary_array TempRange; - typedef typename TempRange::iterator TempIterator; - - TempRange temp(exec, first, last); - - for(TempIterator iter = temp.begin(); iter != temp.end(); ++iter) - { - if(wrapped_pred(*iter)) - { - *first = *iter; - ++first; - } - } - - ForwardIterator middle = first; - - for(TempIterator iter = temp.begin(); iter != temp.end(); ++iter) - { - if(!wrapped_pred(*iter)) - { - *first = *iter; - ++first; - } - } - - return middle; -} - - -__thrust_exec_check_disable__ -template -__host__ __device__ - ForwardIterator stable_partition(sequential::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - InputIterator stencil, - Predicate pred) -{ - // wrap pred - thrust::detail::wrapped_function< - Predicate, - bool - > wrapped_pred(pred); - - typedef typename thrust::iterator_value::type T; - - typedef thrust::detail::temporary_array TempRange; - typedef typename TempRange::iterator TempIterator; - - TempRange temp(exec, first, last); - - InputIterator stencil_iter = stencil; - for(TempIterator iter = temp.begin(); iter != temp.end(); ++iter, ++stencil_iter) - { - if(wrapped_pred(*stencil_iter)) - { - *first = *iter; - ++first; - } - } - - ForwardIterator middle = first; - stencil_iter = stencil; - - for(TempIterator iter = temp.begin(); iter != temp.end(); ++iter, ++stencil_iter) - { - if(!wrapped_pred(*stencil_iter)) - { - *first = *iter; - ++first; - } - } - - return middle; -} - - -__thrust_exec_check_disable__ -template -__host__ __device__ - thrust::pair - stable_partition_copy(sequential::execution_policy &, - InputIterator first, - InputIterator last, - OutputIterator1 out_true, - OutputIterator2 out_false, - Predicate pred) -{ - // wrap pred - thrust::detail::wrapped_function< - Predicate, - bool - > wrapped_pred(pred); - - for(; first != last; ++first) - { - if(wrapped_pred(*first)) - { - *out_true = *first; - ++out_true; - } // end if - else - { - *out_false = *first; - ++out_false; - } // end else - } - - return thrust::make_pair(out_true, out_false); -} - - -__thrust_exec_check_disable__ -template -__host__ __device__ - thrust::pair - stable_partition_copy(sequential::execution_policy &, - InputIterator1 first, - InputIterator1 last, - InputIterator2 stencil, - OutputIterator1 out_true, - OutputIterator2 out_false, - Predicate pred) -{ - // wrap pred - thrust::detail::wrapped_function< - Predicate, - bool - > wrapped_pred(pred); - - for(; first != last; ++first, ++stencil) - { - if(wrapped_pred(*stencil)) - { - *out_true = *first; - ++out_true; - } // end if - else - { - *out_false = *first; - ++out_false; - } // end else - } - - return thrust::make_pair(out_true, out_false); -} - - -} // end namespace sequential -} // end namespace detail -} // end namespace system -} // end namespace thrust - diff --git a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/util/visualizer.py b/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/util/visualizer.py deleted file mode 100644 index 2cc519b52e9e15f5891ac3f4dcab620793794322..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/util/visualizer.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import os -import ntpath -import time -from . import util -import scipy.misc - -try: - from StringIO import StringIO # Python 2.7 -except ImportError: - from io import BytesIO # Python 3.x -import torchvision.utils as vutils -from tensorboardX import SummaryWriter -import torch -import numpy as np - - -class Visualizer: - def __init__(self, opt): - self.opt = opt - self.tf_log = opt.isTrain and opt.tf_log - - self.tensorboard_log = opt.tensorboard_log - - self.win_size = opt.display_winsize - self.name = opt.name - if self.tensorboard_log: - - if self.opt.isTrain: - self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, "logs") - if not os.path.exists(self.log_dir): - os.makedirs(self.log_dir) - self.writer = SummaryWriter(log_dir=self.log_dir) - else: - print("hi :)") - self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, opt.results_dir) - if not os.path.exists(self.log_dir): - os.makedirs(self.log_dir) - - if opt.isTrain: - self.log_name = os.path.join(opt.checkpoints_dir, opt.name, "loss_log.txt") - with open(self.log_name, "a") as log_file: - now = time.strftime("%c") - log_file.write("================ Training Loss (%s) ================\n" % now) - - # |visuals|: dictionary of images to display or save - def display_current_results(self, visuals, epoch, step): - - all_tensor = [] - if self.tensorboard_log: - - for key, tensor in visuals.items(): - all_tensor.append((tensor.data.cpu() + 1) / 2) - - output = torch.cat(all_tensor, 0) - img_grid = vutils.make_grid(output, nrow=self.opt.batchSize, padding=0, normalize=False) - - if self.opt.isTrain: - self.writer.add_image("Face_SPADE/training_samples", img_grid, step) - else: - vutils.save_image( - output, - os.path.join(self.log_dir, str(step) + ".png"), - nrow=self.opt.batchSize, - padding=0, - normalize=False, - ) - - # errors: dictionary of error labels and values - def plot_current_errors(self, errors, step): - if self.tf_log: - for tag, value in errors.items(): - value = value.mean().float() - summary = self.tf.Summary(value=[self.tf.Summary.Value(tag=tag, simple_value=value)]) - self.writer.add_summary(summary, step) - - if self.tensorboard_log: - - self.writer.add_scalar("Loss/GAN_Feat", errors["GAN_Feat"].mean().float(), step) - self.writer.add_scalar("Loss/VGG", errors["VGG"].mean().float(), step) - self.writer.add_scalars( - "Loss/GAN", - { - "G": errors["GAN"].mean().float(), - "D": (errors["D_Fake"].mean().float() + errors["D_real"].mean().float()) / 2, - }, - step, - ) - - # errors: same format as |errors| of plotCurrentErrors - def print_current_errors(self, epoch, i, errors, t): - message = "(epoch: %d, iters: %d, time: %.3f) " % (epoch, i, t) - for k, v in errors.items(): - v = v.mean().float() - message += "%s: %.3f " % (k, v) - - print(message) - with open(self.log_name, "a") as log_file: - log_file.write("%s\n" % message) - - def convert_visuals_to_numpy(self, visuals): - for key, t in visuals.items(): - tile = self.opt.batchSize > 8 - if "input_label" == key: - t = util.tensor2label(t, self.opt.label_nc + 2, tile=tile) ## B*H*W*C 0-255 numpy - else: - t = util.tensor2im(t, tile=tile) - visuals[key] = t - return visuals - - # save image to the disk - def save_images(self, webpage, visuals, image_path): - visuals = self.convert_visuals_to_numpy(visuals) - - image_dir = webpage.get_image_dir() - short_path = ntpath.basename(image_path[0]) - name = os.path.splitext(short_path)[0] - - webpage.add_header(name) - ims = [] - txts = [] - links = [] - - for label, image_numpy in visuals.items(): - image_name = os.path.join(label, "%s.png" % (name)) - save_path = os.path.join(image_dir, image_name) - util.save_image(image_numpy, save_path, create_dir=True) - - ims.append(image_name) - txts.append(label) - links.append(image_name) - webpage.add_images(ims, txts, links, width=self.win_size) diff --git a/spaces/matthoffner/web-llm-embed/src/utils/icon.ts b/spaces/matthoffner/web-llm-embed/src/utils/icon.ts deleted file mode 100644 index 76b0451c9ef2189a46f258ce4539f191fd93fb78..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/web-llm-embed/src/utils/icon.ts +++ /dev/null @@ -1 +0,0 @@ -export const SEND_ICON = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAYAAAD0eNT6AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAACAASURBVHic7d13nF11nf/x17kzEzoJHZQihC66lAgYEIzJzISmq0IAERZXBdfCruWxurpqcNe6v1UBV8W1UlSKsggCk0kMKFWqtNAJTUghmUAKmXLP749JTAKTZMq953PuOa/n4+EDmcw95/0IM/N5z/d77jkJkvIvJWEa29DE1vQxhmTF/6or/gmjSRhDSguwKQkbUWVDEjYFWlYcZWNgg/WcaTmwdMX/7yFlMQnLgFeAxST0kNIFLCKliwpdpCv+10QXfcynnbl1+BuQVGNJdACp9DrYlpQdqbAjCbuQsiOwAynbAK8jYRtgG6A5Nuig9QLzgLmkPA/MJeEFEp4l5SkSnqGHZzmaecE5pVKzAEj1dg0b0MSuwO5U2J2U3UkZS8JuwM7AhsEJo7wCPE3KEyQ8BjxGwmP08hgv8yRT6I4OKBWZBUCqlQ42AfamwhtJ2RfYl5R9SdgVqASnazR9pMwm4QFSHiThQVIepJtZHPe3LQpJI2ABkIZjJtvTzQEkHAAcABwI7IrfU/VWJeVJEu4i5W7gHkZxNxN4ITqY1Gj8YSWtz0zG0MshpBwCHEL/wN8hOJXW9DxwF/BnEm6jmduYQFd0KCnPLADS6vqvtn8jCYcDh9I/8PfC75VGkwIPAbcBt1LlRtp5kIQ0OJeUG/5QU7ldShObszcJh5EwCZgAbB0dS3XxEil/JmE6CTexBbcxjp7oUFIUC4DK51r2oplWUlqBI4HR0ZEUoouUG0jopEInk3gkOpCUJQuAim86W9FHKxVaSZlE/1vvpFd7CugkoZM+pjOZBdGBpHqyAKiYprMbVY4j5VgSjmTV3fCkwegD7iHlauAq2rkzOpBUaxYAFcNMmunhSOCdwLHAbsGJVCQpj5NwNSm/YxE3MIW+6EjSSFkA1LgupYkteCspJwAnAttFR1IpLAB+T8plLKLDOxaqUVkA1FiuYQNamEyVKSQcgxfwKVYXcDUpl1oG1GgsAMq/qVQ4jPErftM/mf4H40h5s5D+MnAZi7jGbQLlnQVA+dXJm6hyGgknATtGx5GG4BngV1S5gMk8EB1GGogFQPkykzF0M4WE04DDouNINfAgcAEt/IQJzI8OI61kAVC8qVQ4nDZSTifl74ENoiNJdfAKcAXwc25mOlOpRgdSuVkAFKeDbUn4AHAm/U/Sk8riOVJ+DHyfduZGh1E5WQCUvQ4OosIZpJwKbBQdRwrUDVxJhR8xkRk+rEhZsgAoGzezEUt4PylnAftFx5FyJ+VeEr5DF7/07YTKggVA9TWT7enmIyR8DJ+yJw3GHFJ+SBPnMYkXo8OouCwAqo/pvIUqnwLei/fhl4ZjMSk/o4nvMoknosOoeCwAqq1ODgc+S8qx0VGkgqiScA1Vvk47N0eHUXFYADRyU6nwVo4h4d+Bg6PjSAV2JwnnspCLvdOgRsoCoOG7gxYWcjopn8On70lZepSUb7AVFzKOnugwakwWAA1d/+A/mZQvArtHx5FK7CngO/TyQ45meXQYNRYLgAbvUkaxBSdR5UskjI2OI+lvnga+TQvnM4FXosOoMVgAtH4zaaab00j4MrBzdBxJazWblKks4iKvEdD6WAC0btOZRB//TcKbo6NIGrSHgC/RyuXeXVBrYwHQwKYziSpfB8ZFR5E0bH8GPk8bM6KDKH8sAFpTJ/uQ8l/AMdFRJNVIynSa+DSTuDc6ivLDAqB+M9maHr4IfAxoio4jqeaqwMW08K9M4IXoMIpnASi7q9iYDfgE8Hlg8+g4kupuCfA9WvhPJrA4OoziWADKrIPjSDgP2CU6iqTMPUfC52nlguggimEBKKNr2YsmzgHao6NICpYwkypn0c790VGULQtAmcxkDD18DvgkMCo6jqTc6AW+T8KXaGVRdBhlwwJQFh2cSMI5wHbRUSTl1vOknEU7l0cHUf1ZAIruOnagwveA90RHkdQgEq6mykdp55noKKof3+5VVCkJ4zmDCr8D9o+OI6mh7EnCP3IqyxjL7Vzv3QSLyBWAIrqO3WniR6RMiI4iqeHdRMKHaWVWdBDVlgWgSO6ghQV8Cjgb2CA6jqTC6AG+TRdfYgrd0WFUGxaAoujkAODHpBwYHUVSYd1Hyodp57boIBo5C0Cj62ATEr4KfAKoRMeRVHh9pJxHN1/gOJZGh9HwWQAa2TTGARcDe0ZHkVQyCQ9T5RTauTM6iobHAtCIUhI6OQv4Ft7QR1KcXlK+yiL+gyn0RYfR0FgAGk0HO5FwIXBkdBRJAvpvJ9zDaRzNs9FRNHjuGTeSDo4n4R4c/pLyJGUCzdxPJ++LjqLBcwWgEVzJZmzM/yPljOgokrQeF7KMj/EuXo4OonWzAORdB4eQcBGwe3QUSRqk2cD7aeOm6CBaOwtAXs2kmR6+CHweaI6OI0lD1At8hS6+5gWC+WQByKOZbE0PvwYmRkeRpBG6gQonMok50UG0JgtA3nRwEAm/AXaJjiJJNfIsKcd7B8F88V0AedLBGSTcjMNfUrHsSMINdPLh6CBaxRWAPLiGDWjme8CHoqNIUp1dyKacyXiWRQcpOwtAtP4b+1wOHBwdRZIykXAXvbyXo5gdHaXM3AKI1MnbSbgDh7+kMkk5kCZup4PW6ChlZgGIkJIwjc+SMh3YNjqOJAXYmoRrV/wsdDU6gH/pWet/fO8FwHuio0hSTlzGck738cLZcgUgSzPZnoTrcfgrh7YdtS0VfyQoxglswEyms110kDLxuz0r1/FGergVGBcdRXq1nTbciScOf4L7x99vCVCUg6lyC53sEx2kLPxOz8I0JlLhRnx/v3Jopw13Ytb4WWzStAn7bLKPJUCRdiXlJq5jQnSQMvC7vN6mcTpwLTAmOIn0GqsP/5UsAQq2BRWuo5PTooMUnd/h9ZKS0MFU4GdAS3Aa6TUGGv4rWQIUbBQpP6eDqb5DoH78i62HSxnFGH4CvD86ijSQdQ3/1c1aMov9bt6PKtWMkkmv8Qu6OIMpdEcHKRoLQK1dzRaM4rfA26OjSAMZ7PBfyRKgHPgDLbyXCXRFBykSC0AtzWAX+rgO2Ds6ijSQoQ7/lSwByoEHSZlMO89EBykKC0CtdLArCTOAXaOjSAMZ7vBfyRKgHHgamEQbj0YHKQKv8KmFTvYh4UYc/sqpkQ5/8MJA5cLOpPyRTt4UHaQI/E4eqes4kJQ/Aq+LjiINpBbDfyVLgMIlbE/K9VznQ9RGyu/ikejkcCr8Adg6Ooo0kFoO/5UsAcqBLakwjQ7GRwdpZH4HD1cnbweuAUYHJ5EGVI/hv5IlQDkwmoRpPlJ4+PzuHY5OjiHlWlI2i44iDaSew38lS4ByYBMSrqKDv48O0oj8zh2qTqaQcgWwYXQUaSBZDP+VLAHKgQ1IuIRpnBAdpNH4XTsUHZxCysV4a1/lVJbDfyVLgHJgFPArOvlAdJBG4n0ABqv/oT4/wdKknIoY/qvzPgHKgSoJH6CVC6KDNAILwGB08h5SLgGao6NIA4ke/itZApQDfaScQjuXRAfJOwvA+nTSTsqVwAbRUaSB5GX4r2QJUA70kPIe2rk6OkieWQDWZRoTgavxgj/lVN6G/0qWAOXAMqocw2RmRgfJKwvA2kznrVSZBmwaHUUaSF6H/0qWAOXAUqpMZjJ/ig6SRxaAgUxjf+APwBbRUaSB5H34r2QJUA4sImUi7dwZHSRvLACv1sF+JFwPbBUdRRpIowz/lSwByoH5VHk7k3kgOkieWABWdx27U+GPwA7RUaSBNNrwX8kSoByYQx9HchQPRwfJCwvASjPYhT7+BOwUHUUaSKMO/5UsAcqB2aQcQTvPRAfJAwsAQCejSbkR2C86ijSQRh/+K1kClAMP0s3hHMvC6CDRvKvdHbSQ8hsc/sqpogx/8LbByoV9GcUVXOO9Xcr9XZiSsICfABOjo0gDKdLwX8kSoBw4kmZ+RlruVfByfwd2cjZwanQMaSBFHP4rWQKUAyfTyRejQ0Qqb/vp5AOk/DQ6hjSQIg//1XlNgIKlJJxe1ocHlbMAdPJ2Ujrof4SklCtlGf4rWQIUrAc4ijZmRAfJWvkKwHT2pcpNwJjoKNKrlW34r2QJULBFpBxOO/dHB8lSuQrATLanh1uBXaKjSK9W1uG/kiVAwWZT4VAmMSc6SFbKcwXOzWxED/+Hw185VPbhD14YqHBvoMrVdFCab8JyfKelJCzmYuCQ6CjSqzn8V7EEKNg4kvK8PbAc32Wd/Bvw7ugY0qs5/F/LEqBgJzCdT0eHyELxW840JgIdQFN0FGl1Dv9185oABeoDjqaNadFB6qnYBaCTnUm5E9g6Ooq0Oof/4FgCFOhF+hjHUcyODlIvxV1jm8mGK+7x7/BXrjj8B8/tAAXaiiYuKfIzA4r7XdXL/wDjomNIq3P4D50lQIEOpoVzo0PUSzG/ozr5J1L+MTqGtDqH//BZAhQm5Qw6+GB0jHoo3jUAHRxCwg1Q3GUbNR6Hf214TYCCvEKFI5jE7dFBaqlYBaCDbUm4E9gxOoq0ksO/tiwBCvI0vYzjaOZFB6mV4qynzaSZhEtw+CtHHP6153aAguxMMxdzaXHeUl6c76Aevgi8PTqGtJLDv34sAQrSyhZ8LjpErRRjC6CD8ST8EW/2o5xw+GfD7QAF6CXhbbRya3SQkWr8AjCTTenhLmCP6CgSOPyzZglQ5lIe5xUO4F28HB1lJBp//ayH7+PwV044/LPndoAylzCWjfhOdIyRauwVgA6OJ+Gy6BgSOPyjuRKgzKWcRDuXRMcYrsatzNewIwnnR8eQAMY0j+Hq/a92+AfaZ5N9OHfvc0ka/PcaNZCEH9LJztExhqsxC8BUKrRwAbBldBRpdPNoOg7s4M2bvTk6Sul9bKeP8cN9fmgJUFbGkHJho741sCFD83n+DYp5a0Y1ltHNo5l24DQOHn1wdBStcNDmB7HDBjvw+/m/j46ictiFDVjMRdwcHWSoGq8md3AQCTcDo6KjqNwc/vn2o2d/xEdmfYSUNDqKiq+HCoc12q2CG6sAdLAJCXcBe0ZHUbk5/BuDJUAZmsVyxnEcS6ODDFZjXQOQ8DUc/grm8G8cZ+x4htcEKCv7MIqzo0MMReN8V1zHwVS4mUa9bkGF4PBvTK4EKCN9wKG0cUd0kMFojBWAmTRT4Xwc/grk8G9crgQoI02k/IQ7aIkOMhiNUQB6+Dywf3QMlZfDv/FZApSJhDfzIp+JjjEY+f9OuJa9aOIeYMPoKConh3+xuB2gDCwn4QBamRUdZF3yvQIwlQpN/BiHv4I4/IvHlQBlYANSfkCa7y+yfO+pf56PAh+JjqFycvgXlzcLUgbewJM8x4XcFR1kbfLbTqbxOuABYEx0FJWPw78c3A5QnS2iiTcykeeigwwkv1sAKd/H4a8ADv/ycDtAdTaaXs6JDrE2+dwC6OAkEr4QHUPl4/AvH7cDVFcJ+3Iq93IhD0VHebX81d6ZjKGHh4DtoqOoXBz+5eZ2gOroryxjb97Fy9FBVpe/LYAepuLwV8Yc/nI7QHX0Ojbi36NDvFq+vtKv441UuBsa4y5KKgaHv1bnSoDqpJuUv6M9P1sB+VoBaOI8HP7KkMNfr+ZKgOpkFHBedIjV5eciwA5Ogsa4faKKweGvtfHCQNVFwm55uiAwHxW3g01IeAjYMTqKysHhr8FwO0B1MJsW9mECr0QHyccWQIXP4PBXRhz+Giy3A1QHb6CXf4kOAXlYAZjOdqQ8Sspm0VFUfA5/DYcrAaqphJdpZk8m8EJkjPgVgJSvOfyVBYe/hsuVANVUymb08OXoGLFfzdN5M1XuIk8XI6qQHP6qBVcCVEN9pOxPO/dHBYhdAajyXzj8VWcOf9WKKwGqoSYSvhkZIO6reBptQEfY+VUKDn/VgysBqpkq72AyMyNOHbMCkJIAXwk5t0rD4a96cSVANVPhGytmYuZilt8P4wTgkyHnVik4/FVv3ixINfJ6HudOLuKRrE+cfQG4lCY25FJgm8zPrVJw+CsrlgDVRMKbGMv5XJ/tnlL2WwCjOR3YJ/PzqhQc/sqa2wGqgf14KydnfdJsv2KvYQOaeQTYOdPzqhQc/orkhYEakZTH2Yp9GEdPVqfMdgWghX/E4a86cPgrmisBGpGEsbzIqdmeMit30MICHgHekNk5VQoOf+WJKwEagSdoYS8m0JvFybJbAVjAP+LwV405/JU3rgRoBHajh/dndbJsvkL7f/t/GNg1k/OpFBz+yjNXAjQsKY8zir2zWAXIZgVgIafj8FcNOfyVd64EaFgSxtLNKVmcqv73Aeh/3/8lwBZ1P5dKweGvRuF9AjQsCfsylu/X+74A9V8BGMMUYLe6n0el4PBXo3ElQMOwJ4fx7nqfpP4FIOEzdT+HSsHhr0ZlCdCQpXy23qeobwGYzmRSDqzrOVQKDn81OkuAhugtdPCOep6gvgWgWv8Go+Jz+KsoLAEakqS+M7R+X4XXcTAVbqvb8VUKDn8VkW8R1KAlHEgrd9fj0PVbAaj4uF+NjMNfReVKgAYtrd8src9X3wxeTx9PAi11Ob4Kz+GvMnAlQIPQQxO7MpHnan3g+qwA9PJxHP4aJoe/ysKVAA1CC32cWY8D1/6r7mY2YjHPAFvV/NgqPIe/ysiVAK3HPDZlF8azrJYHrf0KwMucisNfw+DwV1m5EqD12IaXeV+tD1r7ApDwiZofU4Xn8FfZWQK0HmfV+oC1LQAdHAHsV9NjqvAc/lI/S4DWKuHNdDC+loesbQFI+KeaHk+F5/CX1mQJ0FolfKS2h6uVa9iGZp4BNqjZMVVoDn9p7bwwUANYTi87cTTzanGw2q0ANPEhHP4aJIe/tG6uBGgAG9DCP9TqYE01OcpUKuzML4AtanI8FZrDXxqcgzY/iB022IHfz/99dBTlRcpYdud7XD/ypaHarAAcThuwa02OpUJz+EtD40qA1pAwlvFMqMWhalMAqnygJsdRoTn8peGxBOhVTq/FQUb+1XQdW1LhOWDDkcdRUTn8pZHzwkCtsIwWXscEukZykJGvAFQ4GYe/1sHhL9WGKwFaYSO6mTLSg9RiC8Dlf62Vw1+qLUuAAEhGPntH9hXUwX4k3DfSEComh79UP24HiApvZBIPDv/lI1Oz9yOqWBz+Un25EiCqvH8kLx9+AZhKhYSTRnJyFZPDX8qGJaD0TiEd/n/84ReAQzkS2HHYr1chOfylbFkCSm1nOof/gKDhF4CEk4f9WhWSw1+KYQkotWHP4uF9tVzKKMbwV2Cr4Z5YxeLwl+J5YWApzWNLXs84eob6wuGtAGzBUTj8tYLDX8oHVwJKaRsW0DqcFw6vAKScOKzXqXAc/lK+WAJK6YThvGjoXyHXsAHNzAU2H84JVRwOfym/3A4olS662I4pdA/lRUNfAWiiFYd/6Tn8pXxzJaBUxjCGtw/1RUMvAAnvGfJrVCgOf6kxWAJKJOG9Q3/JUMykmR6eB7Ye6olUDA5/qfG4HVAK82lhBybQO9gXDG0FoIcjcfiXlsNfakyuBJTC1vRy+FBeMLQCkPKuIX2+CsPhLzU2S0ApvHMonzzUawCOHuLnqwAc/lIxWAIKrlqvAtD/6N+xQw6khubwl4rFElBgCWO5lr0G++mDLwAVjh1WIDUsh79UTJaAAmsa/KwefAFILQBl4vCXis0SUFAJxwz+UwdjOltRZQ7QNNxMahwOf6k8fItg4fTQwrZMoGt9nzi4FYA+WnH4l4LDXyoXVwIKp4Ve3jGYTxxcAagM70lDaiwOf6mcLAEFkw5uZg+uAKRMGlEY5Z7DXyo3S0ChTB7MJ62/AHSwN7DzSNMovxz+ksASUCBvYDq7re+T1l8AXP4vNId/vD8t/FN0hFypUmXWklnRMUrLElAQVdrW9ynrLwCD3EtQ43H4x7t8zuV8Y/Y3omPkSwrvuOMdloBAloBCWO/sXncBuJQm4IhapVF+OPzjXT7nct533/voTQf98K7SeKH7BUtAMEtAw3s7U9c949ddAEazPzC6lokUz+Efb+Xw70l7oqPkliUgniWgoW3J4ey3rk9Y3xaAv/0XjMM/nsN/8CwB8SwBDSxd9wxfdwGoWACKxOEfz+E/dJaAeJaABjXsApCSkHJ4zQMphMM/nsN/+CwB8SwBDelI0rX/B1t7AZjOfsDW9UikbDn84zn8R84SEM8S0HC2ZdraHw+8rhWAw+oSR5ly+Mdz+NeOJSCeJaDBJGtfyV9XAXhrXcIoMw7/eA7/2rMExLMENJRD1/YHay8AFQ6pSxRlwuEfz+FfP5aAeJaAhjHEAjCTMaTsWbc4qiuHfzyHf/1ZAuJZAhrCvsxkzEB/MHAB6ONQ8L9oI3L4x3P4Z8cSEM8SkHsJfYwb6A/WVQDUYBz+8Rz+2bMExLME5NxaZvrABSDhLXUNo5pz+Mdz+MexBMSzBOTYWq7pG7gApBxY1zCqKYd/PId/PEtAPEtATqUcMNCHX1sArmMHEraveyDVhMM/nsM/PywB8SwBufR6prPdqz/42gJQGbgpKH8c/vEc/vljCYhnCcihKn/36g+9tgC4/N8QHP7xHP75ZQmIZwnIndf8cv/aApC4ApB3Dv94Dv/8swTEswTkyACzfaAVgNcsEyg/HP7xHP6NwxIQzxKQG/u/+gNrFoCr2JiEXTOLoyFx+Mdz+DceS0A8S0AOpOzOzWy0+ofWLAAbsO9rPqZccPjHc/g3LktAPEtAuCZeWvMW/2sO+4R9M42jQXH4x3P4Nz5LQDxLQLDKmjN+zQKQWgDyxuEfz+FfHJaAeJaAQMm6CgAWgDxx+Mdz+BePJSCeJSBIuu4C8MYMo2gdHP7xHP7FZQmIZwkIkKw541cVgGvYANgl6zx6LYd/PId/8VkC4lkCMpYyljtoWfmvqwpAC7sBTRGZtMro5tF0HtTp8A902ZzLOPm+kx3+JfBC9wu03dXGY0sfi45SWmfseAbf3eu70THKopn5q37RX1UAquweEkd/M6oyisvefBlv2dynMUe5fM7lnHLfKfSmvdFRlJFnX3mWt93+NlcCAp2181l8abcvRccoh+ZVs35VAagwNiSMAKgkFS7a7yJat2qNjlJaLvuXl9sB8c4eezYf3+nj0TGKr7pq1q8qAKkFINI3dv8GJ2x3QnSM0nLZX24HxPvuXt9l4pYTo2MUWzJwAXALIMjkrSbzmTd8JjpGabnsr5XcDojVlDRx8ZsuZvtR20dHKa50oC2AxBWACNuN2o6f7fczr4IN4rK/Xs3tgFj+TKy7VxWAlATYKSpNmf1gnx/YdoM4/LU2loBYk7eazAde/4HoGEW188r/018AprENsGFUmrKauOVE3r3tu6NjlJLDX+tjCYj19d2/zpjmMdEximgTrmNLWLUF4G//GWtJWjhv7/OiY5SSF/xpsLwwMM62o7bly2O/HB2jqHYCC0CY9+/wfvbZZJ/oGKXjBX8aKi8MjPOxnT7GThs6nmouWb0AVCwAWUpI+PQun46OUTou+2u43A6I0ZK08ImdPhEdo4h2hlUXAe4YGqVkjtnmGN64qc9dypLL/hoptwNinLnjmV4LUHtrbAG8LjBI6Zz5+jOjI5SKy/6qFbcDsrd58+actP1J0TGKJWEHWFUAtguMUipbtGxB21Zt0TFKw2V/1ZrbAdmzANTctrBqC8ACkJEp201hVGVUdIxScPirXiwB2XrbmLfx+g1eHx2jOJL+mb9yBWDbwCilcuzWx0ZHKAX3/FVvXhOQnUpS4ZhtjomOURzpygIwlQoJW0fnKYNKUmH8mPHRMQrPPX9lxWsCsnPEmCOiIxTJtqQkFQ5mK6A5Ok0ZvHnTN7Nly5bRMQrNZX9lze2AbBy5xZHREYqkhQ62qDCKbaKTlMXBow+OjlBoLvsritsB9bfjhjt6HUAtpWxTocoW0TnKYs+N94yOUFgu+yua2wH1t8fGe0RHKI4KYyokeIeFjPjFWx8u+ysv3A6oL3+G1lATW1SoWgCysutGu0ZHKByX/ZU3bgfUz9iNx0ZHKI4+xlSouAWQFW9nWVsu+yuv3A6oj9HNo6MjFEfCGFcAMrRp06bREQrDZX/lndsBtefP0BpK+lcANo/OURabNG0SHaEQXPZXo3A7oLY2a94sOkJxpP0rAFaqjLhUPXIu+6vRuB1QOz1VS38NbVIhYePoFGWxuG9xdISG5rK/GpXbAbXhz9AaSti4AmwUnaMs/OIdPoe/Gp0lYORe7ns5OkJxpGzkCkCG5nbPjY7QkNzzV1F4TcDI+DO0hlI2rpBaALLiN/3QueevovGagOF7dOmj0RGKI+kvAG4BZMQv3qFx2V9F5XbA8PgztKY2qpCwYXSKsnhg8QPRERqGy/4qOrcDhqY37eWRJY9ExyiSjSr4KODM3Nh1IylpdIzcc9lfZeF2wODd/fLdXkhdW80WgAzN6Z5jg10Pl/1VNm4HDM4NC2+IjlA0TRWgKTpFmcxcODM6Qm657K+ycjtg/a5fcH10hKJprpBaALJ02ZzLoiPkksv+Kju3A9auq7eL6QumR8comqYKiQUgS9cvvJ7nlj8XHSNXXPaX+rkdMLDfzPkNy6vLo2MUTbNbABmrplUufeHS6Bi54bK/tCa3A17r1y/8OjpCETVVohOU0XnPnOdSNy77S2vjdsAq9y2+jxkLZkTHKKQK0BcdomyeXPYkl8+5PDpGKJf9pXVzO6DfN2d/07dP10dfhdQCEOFbs79V2i9ql/2lwSn7dsDsZbPdMq2f3gqJBSDC3S/fzQV/vSA6RuZc9peGpszbAf/66L/6i0L99LkFEOizj36WRb2LomNkxmV/aXjKuB3wp4V/Kv1WaZ31VgB/FQsyp3sOX378y9ExMuGyvzQyZdoOWF5dzkcf+mhpt0kz0mcBCHbu0+dy9byro2PUlcv+Um08+8qzTLhzQuFLwOce/Rz3L74/OkbR9VZIeSU6RZmlpHzwwQ/y/PLno6PUhcv+Um0VvQRcO/9aznn63GHlZgAAG+BJREFUnOgYZbCsQsLS6BRlN7d7Lu+77310V7ujo9SUy/5SfRS1BDy29DFOu/80l/6zsbRCwrLoFOq/RfBpD5xGNa1GR6kJl/2l+ipaCZjXPY9j7j6G+T3zo6OUQ8qyCqkrAHlxyQuX8MlHPhkdY8T8zV/KRlFKwOK+xRx191E8stTHpWcmYWkFXAHIk3OfPpePP/Txhl0J+Plff8777nufv/lLGVl5n4C7X747OsqwLOhZwOS7JnPnS3dGRymXxBWAXPqfZ/6H4+89nleqjXV95jdnf5MPPPABh7+UsRe6X+CI249g2ovToqMMyexlsxl/+3hu6ropOkr5pCytUGFxdA691hVzr6DtrraGeHTwkr4l/MP9/8DnHv1cdBSptBb3Lead97yT8589PzrKoFy/8Hre+ue38vCSh6OjlNWSCild0Sk0sD8t/BN/d8vfcdW8q6KjrNUDix/g0D8fygXPl++2xlLeLK8u5yOzPsJ7/vIeFvYsjI4zoL60j7OfOJtJd07ihe4XouOU2UILQM692PMi77rnXZw560xe7HkxOs7fLK8u52tPfo1xt43zhh1Szlwx9woOvO3A3N1k7C8v/4Uj7ziSqY9PpS/1LvTBuipULAB5l5Lyo2d/xB437cG5T58b/o0zfcF0Drj1AL7w2Bca7joFqSxmL5vNcfccx6Q7J/HgkgdDsyzsWci/PPwvjLttnPv9eZHS5QpAA1nYs5B/fvif2fOmPTn36XNZVs3uDRwpKVfPu5rDbz+c1jtbS/VQEqmRzVgwg/1v2Z8p907hjpfuyPTcc7vncvYTZzP2prGc8/Q5XiCcJwkLEzoYT4KVrAFtP2p7ztzxTE7e/mT22mSvupzj+eXPc9mcyzj/2fPDf4soqrat2ug4sCM6Rm5U0ypN05uiYxRSQsJRWx/FB173AY7Z5hg2qmxU83NU0yo3LLyBi164iIufv5jl1eU1P4dqoMohCZ3sQ4o/2RvcAZsdwPHbHc+RWxzJWzZ/C6Mqo4Z1nGpa5b7F93HDwhu4ct6V3LDwhvAth6KzAKzJApCNzZo3413bvIvJW03myC2OZMcNdxz2sRb2LOTGrhuZvmA6l8+5nL8u/2sNk6ouKuyVMJOt6WFedBbVzsZNG/OWzd/CPpvsw+4b784eG+/BtqO2ZeOmjdm0aVMqVFhaXcqSviXM657Hk8ue5NGlj/Lwkoe57aXbcnv1cFFZANZkAYix60a7Mm7zceyx8R7svvHu7LLhLoxuHs3mzZvTkrRQpcrCnoUs7VvKc8uf49Glj/Lo0kf5y8t/4f4l9zfszctKq4UtElISOlkOtETnkcrIArAmC4BUd8tpZaMKCSng0xckSSqHuSSklRX/Mic0iiRJyspcAAuAJEnlMgdWFoDUAiBJUkmsVgASng+NIkmSsvICrCoAz4ZGkSRJWXkaVm0BPB0aRZIkZSPhGVi1AvBMaBhJkpSV1VYAmi0AkiSVQvPqKwATmA8sjcwjSZLqbjET+p8CXFntg14IKElSsf3tmr9VBSDliZAokiQpK4+v/D+rCkDCYyFRJElSNlab9asXgMcH/GRJklQM6UArAFVXACRJKrQBVwCwAEiSVGiVgQrAIp4A+iLySJKkuuuhwlMr/2VVAZhCNymzIxJJkqS6e5wJ9K78l8oaf5TwYOZxJElSFtaY8WsWgJQHMo0iSZKyssaMX7MAVJiVaRRJkpSVNWb8mgWg6gqAJEmFVFnXCkA3s4BqlnkkSVLd9dHEI6t/YM0CcBxLwWcCSJJUMI8ygVdW/0BlgE+6J6MwkiQpG3e/+gMDFYDXfJIkSWpg6WAKwACfJEmSGtprVvcHWgG4M4MgkiQpK32DKQDtzAWezyKPJEmqu2c5mnmv/uBAKwAAd9U5jCRJysaAW/trKwB/rmMQSZKUlYTbBvrw2grArXWMIkmSspIOPNMHLgAt/BnvCChJUqOr0svtA/3BwAVgAl3Aw/VMJEmS6u4Bjualgf5gbVsA4DaAJEmNbS3L/7DuAjDgRQOSJKlhrHWWr70AVLmxLlEkSVI2mvjT2v5o7QWgnQeBufXII0mS6izlBSat+Qjg1a29ACSkwE31yCRJkuos4Y/r+uN1XQOw3hdLkqTcGkEBgBtqGESSJGUlXfcMX3cBWMi9QFct80iSpLp7kVt4cF2fsO4CMIW+9TUISZKUOzOZuu47+q5vCwASOmsWR5Ik1V+6/tm9/gJQsQBIktRQmpi+vk9ZfwHofw/h7BrEkSRJ9fcYk3hifZ+0/gLQb8YIw0iSpCwMYvkfBlsABnkwSZIUbJDX7g2lAPSOJI8kSaq7HhL+MJhPHFwBmMwC4OaRJJIkSXWWcgOtLBrMpw72GgBIuXrYgSRJUv1V+P3gP3Xwn2kBkCQpz9J6FIBWZgGPDSePJEmqu4do49HBfvLgC0C/QTcLSZKUqSHN6KEVgCpXDunzJUlSNlJ+N5RPH1oBeIk/AnOH9BpJklRvc1jETUN5wdAKwBT6YGgNQ5Ik1VnKb1fM6EEb6jUA/SeRJEn5kfCbob5k6AVgK6YDC4f8OkmSVA8v0sINQ33R0AvAOHrw3QCSJOXF/zFh6LfrH3oBAEi5dFivkyRJtZVw2XBeNrwCsBXXAS8O67WSJKlW5tLMjOG8cHgFoH8b4PJhvVaSJNXKr4ez/A/DLQD9fjWC10qSpJFKhj+Lh18AWvkjMHvYr5ckSSPxBJO4bbgvHn4BSEjBiwElSQqR8qsVs3hYRrIFAFUuGNHrJUnS8FS4eGQvH4nJPADcMaJjSJKkobqZVmaN5AAjKwAAKT8b8TEkSdJQjHj2jrwA9C9BLBvxcSRJ0mAso2Xkb8UfeQFoZRFw5YiPI0mS1i/lMibQNdLDjLwA9HMbQJKkLKT8vBaHqU0BaKUTeKwmx5IkSWvzGO1cX4sD1aYA9L8P8cc1OZYkSVqbH4zkvf+rq9UWALTwE+CVmh1PkiSt7hUq/KJWB6tdAZjAfFJ+W7PjSZKkVVIuZVLtnsRbuwLQf7Qf1PR4kiSpXxM/rOXhalsAWrmRlHtrekxJknQPk7illgesbQEASPhOzY8pSVK5fbvWB6x9AdiSi4Fna35cSZLK6Tm6uKTWB619ARhHDynfr/lxJUkqp3OZQnetD1r7AgDQww+BxXU5tiRJZZHwMi38qB6Hrk8BOJaFwE/rcmxJksoi5X9rcd//gdSnAAD08R2gt27HlySp2Hpp4tx6Hbx+BeAoZgNX1O34kiQVWcqlTOSpeh2+fgWg/+j/VdfjS5JUVCnn1PPw9S0Ak7gduLGu55AkqWgSZjKZP9fzFPUtAAAp/133c0iSVCx1n531LwC38Dvg0bqfR5KkYpjFJK6p90nqXwCmUiXlm3U/jyRJxfB1EtJ6n6T+BQBgKy4AnsjkXJIkNa7HaOFXWZwomwIwjh7g65mcS5KkRpXwH0zI5h462RQAgC35BfBkZueTJKmRpDxOM7/M6nTZFYD+hwR9I7PzSZLUWDL77R+yLAAAi/g5MDvTc0qSlH+PMoqLszxhtgVgCt2kTM30nJIk5V3Kl7L87R+yLgAAi7gIeCDz80qSlE/3cQuXZn3S7AvAFPqAL2Z+XkmS8ulzTKWa9UmzLwAAbVxByi0h55YkKT9uoq3+d/0bSEwBAEhcBZAklVzK56NOHVcA2phBynVh55ckKdbvaOePUSePKwD9Z/8UZHvVoyRJOdBLhX+LDBBbAFqZRcJPQzNIkpS1lB8wiQcjI8QWAIAqXwReio4hSVJGuhjFV6JDxBeAduaS8K3oGJIkZSLhq0xgfnSM+AIAsAnfBp6KjiFJUl2lPE4P50XHgLwUgPEsI+Uz0TEkSaqzT3I0y6NDQF4KAEA7lwMd0TEkSaqTabRzVXSIlfJTAAASPgn0RMeQJKnGuunjrOgQq8tXAWhlFuRjb0SSpBr6b47i4egQq8tXAQDo5Wzg+egYkiTVyLO08LXoEK+WvwJwNC8B/xwdQ5Kkmkj5BBNYHB3j1fJXAADauIyE/4uOIUnSCF1Kez7nWT4LAEAfHwW6omNIkjRMi4BPRodYm/wWgMk8D7EPSpAkadhSPkUbf42OsTb5LQAArZwP/CE6hiRJQ3Q9bfwsOsS65LsAJKRU+CfglegokiQN0jKqfJiENDrIuuS7AABM4hFSvhodQ5KkQZrKZB6LDrE++S8AAKP4BnB3dAxJktbjL2zJd6JDDEZjFIAJ9FLhTKAvOookSWvRS8oHGdcYt7RvjAIAMInbSb1NsCQpt75NO3dGhxisxikAAN18gSRf91KWJAl4kE2ZGh1iKBqrABzHUvp4H9AdHUWSpBWWA6cwnmXRQYaisQoAwGTuIuHL0TEkSVrh87RxT3SIoWq8AgBwE9/CGwRJkuJ1cjPfjQ4xHI1ZAKZSpYnTgAXRUSRJpbWQlA8ylWp0kOFozAIAMJHnSPhwdAxJUmmdSTvPRIcYrsYtAACt/Bb4eXQMSVLpnE8bl0WHGInGLgAAKR8HHomOIUkqjcdo4TPRIUaq8QtAO0uAU6Ax7rwkSWpoPVQ5hQksjg4yUo1fAADauAP4z+gYkqTC+yKT+XN0iFooRgEA6OKrpEyPjiFJKqxruZn/ig5RK8UpAFPoI+VE4MnoKJKkwnmKFk5r1Lf8DaQ4BQBgMguA90Bj3Y5RkpRrr5DyXiYwPzpILRWrAAArbsd4ZnQMSVJBJHy0kZ7yN1jFKwAAbVxIwo+iY0iSGt73aOVn0SHqoZgFAGALPg7cFB1DktSwbqWLT0eHqJfiFoBx9JByMjA3OookqeHMoYnjmVLcx88XtwAAtPMMVU4CeqOjSJIaRi9wIhN5LjpIPRW7AABMZiYpn4+OIUlqGJ+hjRuiQ9Rb8QsAQBv/Dxr7oQ2SpEz8ijbOiQ6RhXIUgISUTfkHUm6JjiK9Wne1sFuMw7I8XR4dQeV1O8v5UHSIrJSjAACMZxmjeCfwWHQUaXUv970cHSFXXu7170MhniTlWI5jaXSQrJSnAABMYD4pxwELo6NIKznw1vRS70vREVQ+C+jjKNrL9a6xchUAgHYeIuXvAdcZlQsLe+2jq+vq7YqOoHLppsrxHMXD0UGyVr4CANDOH0k5HUijo0jzuuc59Fbz6NJHoyOoPFLgg0xmZnSQCOUsAADt/JqEs6NjSACPLHkkOkJuPLykdL+IKUrKF2jjougYUcpbAAAm8RXg59ExpIeWPhQdITf8u1AmEn5KO1+PjhGp3AUgIWVLziBlenQUldutXbdGR8gN/y5UdwkzWcg/RceIVu4CAP3PDOjjvcB90VFUXjMXlnIL8jUeW/oYT73yVHQMFdsDNPOeIt/jf7AsAABH8xIpxwCzo6OonB5a8hDPLS/0bccHZcaCGdERVGxP0EQ7E/CqWywAq7TzDE1MAv4aHUXldNW8q6IjhLt6/tXREVRcz5EyqegP+BkKC8DqJvI4FSYAc6KjqHwufP7C6Aih5nbPpWN+R3QMFdM8Elpp58noIHliAXi1STxClXZgQXQUlcvNXTeX+i1wv3zhl/SkPdExVDyLqDKZVmZFB8kbC8BAJvMXEo4hwXu0KlP/+9z/RkcIUU2r/Pi5H0fHUPEsIeFYJnNXdJA8sgCsTSu3UuXvgWXRUVQeP3z2h8zvmR8dI3P/N+//eGDxA9ExVCzLSDiWVm6MDpJXFoB1aecP4HMDlJ0lfUs47+nzomNk7utPlvp+LKq9bhJOoJXro4PkmQVgfdqYBpwM9EZHUTmc8/Q5vND9QnSMzFw+53LueOmO6Bgqjj4STqWV30cHyTsLwGC0cQUJHwSq0VFUfIt6F/Gvj/xrdIxMLO1bymce+Ux0DBVHFTidVi6NDtIILACD1coFJJyKKwHKwIXPX8gfFvwhOkbdffnxL3vnP9VKH/DBMj/cZ6iS6AANZxrvBC4FNoiOomIbu9FY7jz0TkY3j46OUhe3LrqVI24/wrf+qRa6SXgfrfwmOkgjcQVgqNr4HSnvxncHqM4eX/Y4H3rwQ9Ex6mJBzwJOuvckh79qYSkV3uXwH7qm6AAN6SIe4/38iQrH40qA6ujBJQ+yZcuWHDL6kOgoNdOX9nHCvSd44Z9qYQnwLlrpjA7SiFwBGK52/kjKO4AXo6Oo2D71yKe4fM7l0TFq5pMPf5Jr518bHUONr4sKrbThE6SGyQIwEm3cQUIrMC86ioqrL+3jlPtPofPFxv8l58uPf5nzninffQ5Uc3Op8nYmcUt0kEZmARipVu4m5QjwCVOqn+5qN8ffe3xDvzPgK098ha888ZXoGGp8z1PlHUzmL9FBGp3vAqiVa3kDFaaTMDY6ioprVGUUF7zxAk7c/sToKIOWkvLpRz7Nd576TnQUNb7ZNDGJiTweHaQIvAiwVi6mi1O5goQ2YNvoOCqmvrSP3877LRtWNuSwMYeR5LzDd/V2cdK9J/GLv/4iOooaXcq9NDORiXjjiBrJ90+PRnQlm7ERlwBHRUdRsU3cciIXv+lithu1XXSUAd350p2ceO+JPL7MX9Y0QinTqXA8rSyKjlIkXgNQa+/iZVp4J3B+dBQV24wFMzjo1oO4Yu4V0VHW8Er1Fb7yxFcYf/t4h79q4WdsxdEO/9pzBaCepvHPwHfw71l19o4t38H39/4+e22yV2iOmQtm8rGHPsasJbNCc6gQUlK+QjtTo4MUlYOp3qZxAnABsGF0FBXbqMooTtruJL6w2xfYc+M9Mz33TV038c3Z3+SqeVdlel4V1nJSPkg7F0cHKTILQBY6GE/ClcDW0VFUfM1JMydufyIfev2HOGLMEVSS+uz0Le1bym/n/pYfPPsDbu66uS7nUCktAN5DGzdEByk6C0BWrmN3KlwD7BEdReWxy4a7cMoOp3DU1kdxyOhDaElaRnS8rt4ublh4A1fMvYLfzv0tL/e+XKOkEgBPkHIM7TwUHaQMLABZms5WVLkSOCw6ispn06ZNedsWb2Pc5uPYe5O92Wvjvdhlo13YuuW1C1M9aQ/zu+fz8NKHeWTJIzy09CFuXHgjd718F31pX0B6lcBtpLyTduZGBykLC0DWrmJjNuDnwAnRUaSVNm3alE2bNqUn7WFx32KWV5dHR1K5/IpN+SDjfcpqliwAUTo4g4TvASNbk5WkxtUL/DttfDM6SBlZACJ1cARwCQnbR0eRpIzNA072aX5xLADRZvB6+rgcODQ6iiRl5A4S3ksrT0cHKTPvBBhtIs/Ry9uBc6OjSFLdJfyILg5z+MdzBSBPpvF++m8hvHF0FEmqsVdI+Rjt/DQ6iPpZAPKmkwNI+Q2wa3QUSaqRp6lwPJO4PTqIVnELIG9auZsKbwGmRUeRpBq4lioHOPzzxwKQR5N4kS6OBr4A9ETHkaRh6AY+x80cy2QWRIfRa7kFkHfTeQtVLgKyfbqLJA1XwsNUOYV27oyOorVzBSDvJnE7m7I/vktAUmO4kGbGOfzzzxWARtLJe0j5EbBVdBRJepUuUj5CO5dEB9HgWAAazUy2p4efAkdFR5GkFWbQxD8wkeeig2jw3AJoNBN4gVaOAf4F8IktkiL1kHI2N9Pm8G88rgA0sg72I+GXwJuio0gqnVkknEIrd0cH0fC4AtDI2rmf5RwKfBvwIe2SstBLwn+xKQc5/BubKwBFMY39gf8FxkVHkVRQKffSxIe8qU8xuAJQFG3cQwtvpf/agCXRcSQVyjJSzmYRb3H4F4crAEU0g7H0cT4wMTqKpAaX8Cd6+TBH8XB0FNWWBaCoUhKmcyop38b7Bkgaui5SPksb/0tCGh1GtWcBKLr++wZ8Czg1OoqkBpFwNRU+4lv7is0CUBbTeDfwPeB10VEk5dazwMdo43fRQVR/XgRYFm1cQcqepJyNNxCStKYe4FyWsa/DvzxcASijaewBfBc4OjqKpHAzqHAWk3gwOoiyZQEosw6OI+EcYNfoKJIylvI4Cf9GG5dFR1EMtwDKrJ2r6GJv+u8dsCg6jqRMLCblbEaxn8O/3FwBUL/pbEWVLwEfBZqj40iquV4SfkqVL9LO3OgwimcB0JquZS+a+A/ghOgokmokZTrwSdq5PzqK8sMCoIF1MJ6ErwNHREeRNGy3UuXzTGZmdBDljwVA6zadSVT5FnBAdBRJg/YAcLZ7/FoXLwLUuk1iOjczjv47CT4RHUfSOj0KvI+bebPDX+vjCoAG7w5aWMjJpPw7sEd0HEl/M5uUrzOKnzKB3ugwagwWAA3dVCqM573AfwJ7RseRSuxJUr7h4NdwWAA0fDNppptTSPg8FgEpS7OAr9PFL5lCX3QYNSYLgEZuKhXeyjErisCh0XGkArsJOIcufuvg10hZAFRbnRwOfJaUY/DrS6qFKgnXAF+llVujw6g4/AGt+ujkAFI+CZwIjIqOIzWchJeBH1PhHCbyVHQcFY8FQPU1ne2ocjpwFvC64DRSI5hDyg9JOZfJLIgOo+KyACgbM9mQbk4BziLhzdFxpBy6h4TvsJBfM4Xu6DAqPguAstfBQVQ4g5T3AxtHx5ECLQd+R4UfMYnp0WFULhYAxZnJGLqZQsJZwBuj40gZehT4CS38hAnMjw6jcrIAKF5KQifvAE4H3gtsFBtIqoulwG+o8jPauZ6ENDqQys0CoHy5hs1p4e+pcioJE/FrVI3vTuBCqlzoRX3KE3+4Kr+msy99nErCycAu0XGkIXgS+BUpF9LOQ9FhpIFYANQYOjiIhNPov6/AdtFxpAEsAH5PhQuYyAyX+JV3FgA1lpk000MbcALwTmDL4EQqtxdJuBK4lGZm+EAeNRILgBrXpTSxBW8l5QT6C8EO0ZFUCi8C15ByGVtxHePoiQ4kDYcFQMXQXwbeBryTlGOBPaIjqVAeAa4m5UoWcZMP4lERWABUTNPZjT4mUeE4UtrweQQamj7gVuAqKlzFJB6MDiTVmgVAxTeTMfQykZRWoBXYLTqScijlcWAaCZ0k/IFWFkVHkurJAqDymcFY+v5WBt6OFxKW1YvATGA6FTqZxBPRgaQsWQCkldsFCYfTXwh2Ck6k+phLwp9JuZGU6dzC3UylGh1KimIBkF6tk31IOYyEt5JyCLAPUImOpSGpAg8Ct5FyC1Vu5Cgejg4l5YkFQFqfa9icURxMH4dS4RBSDgBeHx1La3gWuJuUP5NwC73cztG8FB1KyjMLgDQcHWxLwv7AASQcsKIUjAWagpMVXR8Jj5FyN3APKXfRxz0czbzoYFKjsQBItTKTDelhb/q3DPYD9l7xz92A5shoDaiHhCdIeYCUWcD9pMyiykMczfLocFIRWACkeruDFuazC83sTpWxpOxOhd1J2Y3+hxxtEh0xyBLgKeDxFb/V9/+zwmNUeMrb6kr1ZQGQol3HljSzI33sDOwM7ETCDsA2JGxPynbANjTOzYy6gXkkzCHlBWAe8FdSngWepomneYVnOJaFsTGlcrMASI3iOrYkZRsqjKHCGKqMIWELEsaQMgYYTUoTCWNIaKHKpiRsBGy44gjNwGbrOcvL8LffvF8hZRkVFpPSQ0oX0EvCS8BCoIuULip0UV3xvz7mOtilxvD/AdNdiuyj84QBAAAAAElFTkSuQmCC"; \ No newline at end of file diff --git a/spaces/merle/PROTEIN_GENERATOR/model/Track_module.py b/spaces/merle/PROTEIN_GENERATOR/model/Track_module.py deleted file mode 100644 index e710dd3763b9eb6475fc609a1d26ee19063bc13a..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/model/Track_module.py +++ /dev/null @@ -1,476 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from opt_einsum import contract as einsum -import torch.utils.checkpoint as checkpoint -from util import cross_product_matrix -from util_module import * -from Attention_module import * -from SE3_network import SE3TransformerWrapper -from icecream import ic - -# Components for three-track blocks -# 1. MSA -> MSA update (biased attention. bias from pair & structure) -# 2. Pair -> Pair update (biased attention. bias from structure) -# 3. MSA -> Pair update (extract coevolution signal) -# 4. Str -> Str update (node from MSA, edge from Pair) - -# Update MSA with biased self-attention. bias from Pair & Str -class MSAPairStr2MSA(nn.Module): - def __init__(self, d_msa=256, d_pair=128, n_head=8, d_state=16, - d_hidden=32, p_drop=0.15, use_global_attn=False): - super(MSAPairStr2MSA, self).__init__() - self.norm_pair = nn.LayerNorm(d_pair) - self.proj_pair = nn.Linear(d_pair+36, d_pair) - self.norm_state = nn.LayerNorm(d_state) - self.proj_state = nn.Linear(d_state, d_msa) - self.drop_row = Dropout(broadcast_dim=1, p_drop=p_drop) - self.row_attn = MSARowAttentionWithBias(d_msa=d_msa, d_pair=d_pair, - n_head=n_head, d_hidden=d_hidden) - if use_global_attn: - self.col_attn = MSAColGlobalAttention(d_msa=d_msa, n_head=n_head, d_hidden=d_hidden) - else: - self.col_attn = MSAColAttention(d_msa=d_msa, n_head=n_head, d_hidden=d_hidden) - self.ff = FeedForwardLayer(d_msa, 4, p_drop=p_drop) - - # Do proper initialization - self.reset_parameter() - - def reset_parameter(self): - # initialize weights to normal distrib - self.proj_pair = init_lecun_normal(self.proj_pair) - self.proj_state = init_lecun_normal(self.proj_state) - - # initialize bias to zeros - nn.init.zeros_(self.proj_pair.bias) - nn.init.zeros_(self.proj_state.bias) - - def forward(self, msa, pair, rbf_feat, state): - ''' - Inputs: - - msa: MSA feature (B, N, L, d_msa) - - pair: Pair feature (B, L, L, d_pair) - - rbf_feat: Ca-Ca distance feature calculated from xyz coordinates (B, L, L, 36) - - xyz: xyz coordinates (B, L, n_atom, 3) - - state: updated node features after SE(3)-Transformer layer (B, L, d_state) - Output: - - msa: Updated MSA feature (B, N, L, d_msa) - ''' - B, N, L = msa.shape[:3] - - # prepare input bias feature by combining pair & coordinate info - pair = self.norm_pair(pair) - pair = torch.cat((pair, rbf_feat), dim=-1) - pair = self.proj_pair(pair) # (B, L, L, d_pair) - # - # update query sequence feature (first sequence in the MSA) with feedbacks (state) from SE3 - state = self.norm_state(state) - state = self.proj_state(state).reshape(B, 1, L, -1) - - msa = msa.index_add(1, torch.tensor([0,], device=state.device), state.type(torch.float32)) - # - # Apply row/column attention to msa & transform - msa = msa + self.drop_row(self.row_attn(msa, pair)) - msa = msa + self.col_attn(msa) - msa = msa + self.ff(msa) - - return msa - -class PairStr2Pair(nn.Module): - def __init__(self, d_pair=128, n_head=4, d_hidden=32, d_rbf=36, p_drop=0.15): - super(PairStr2Pair, self).__init__() - - self.emb_rbf = nn.Linear(d_rbf, d_hidden) - self.proj_rbf = nn.Linear(d_hidden, d_pair) - - self.drop_row = Dropout(broadcast_dim=1, p_drop=p_drop) - self.drop_col = Dropout(broadcast_dim=2, p_drop=p_drop) - - self.row_attn = BiasedAxialAttention(d_pair, d_pair, n_head, d_hidden, p_drop=p_drop, is_row=True) - self.col_attn = BiasedAxialAttention(d_pair, d_pair, n_head, d_hidden, p_drop=p_drop, is_row=False) - - self.ff = FeedForwardLayer(d_pair, 2) - - self.reset_parameter() - - def reset_parameter(self): - nn.init.kaiming_normal_(self.emb_rbf.weight, nonlinearity='relu') - nn.init.zeros_(self.emb_rbf.bias) - - self.proj_rbf = init_lecun_normal(self.proj_rbf) - nn.init.zeros_(self.proj_rbf.bias) - - def forward(self, pair, rbf_feat): - B, L = pair.shape[:2] - - rbf_feat = self.proj_rbf(F.relu_(self.emb_rbf(rbf_feat))) - - pair = pair + self.drop_row(self.row_attn(pair, rbf_feat)) - pair = pair + self.drop_col(self.col_attn(pair, rbf_feat)) - pair = pair + self.ff(pair) - return pair - -class MSA2Pair(nn.Module): - def __init__(self, d_msa=256, d_pair=128, d_hidden=32, p_drop=0.15): - super(MSA2Pair, self).__init__() - self.norm = nn.LayerNorm(d_msa) - self.proj_left = nn.Linear(d_msa, d_hidden) - self.proj_right = nn.Linear(d_msa, d_hidden) - self.proj_out = nn.Linear(d_hidden*d_hidden, d_pair) - - self.reset_parameter() - - def reset_parameter(self): - # normal initialization - self.proj_left = init_lecun_normal(self.proj_left) - self.proj_right = init_lecun_normal(self.proj_right) - nn.init.zeros_(self.proj_left.bias) - nn.init.zeros_(self.proj_right.bias) - - # zero initialize output - nn.init.zeros_(self.proj_out.weight) - nn.init.zeros_(self.proj_out.bias) - - def forward(self, msa, pair): - B, N, L = msa.shape[:3] - msa = self.norm(msa) - left = self.proj_left(msa) - right = self.proj_right(msa) - right = right / float(N) - out = einsum('bsli,bsmj->blmij', left, right).reshape(B, L, L, -1) - out = self.proj_out(out) - - pair = pair + out - - return pair - -class SCPred(nn.Module): - def __init__(self, d_msa=256, d_state=32, d_hidden=128, p_drop=0.15): - super(SCPred, self).__init__() - self.norm_s0 = nn.LayerNorm(d_msa) - self.norm_si = nn.LayerNorm(d_state) - self.linear_s0 = nn.Linear(d_msa, d_hidden) - self.linear_si = nn.Linear(d_state, d_hidden) - - # ResNet layers - self.linear_1 = nn.Linear(d_hidden, d_hidden) - self.linear_2 = nn.Linear(d_hidden, d_hidden) - self.linear_3 = nn.Linear(d_hidden, d_hidden) - self.linear_4 = nn.Linear(d_hidden, d_hidden) - - # Final outputs - self.linear_out = nn.Linear(d_hidden, 20) - - self.reset_parameter() - - def reset_parameter(self): - # normal initialization - self.linear_s0 = init_lecun_normal(self.linear_s0) - self.linear_si = init_lecun_normal(self.linear_si) - self.linear_out = init_lecun_normal(self.linear_out) - nn.init.zeros_(self.linear_s0.bias) - nn.init.zeros_(self.linear_si.bias) - nn.init.zeros_(self.linear_out.bias) - - # right before relu activation: He initializer (kaiming normal) - nn.init.kaiming_normal_(self.linear_1.weight, nonlinearity='relu') - nn.init.zeros_(self.linear_1.bias) - nn.init.kaiming_normal_(self.linear_3.weight, nonlinearity='relu') - nn.init.zeros_(self.linear_3.bias) - - # right before residual connection: zero initialize - nn.init.zeros_(self.linear_2.weight) - nn.init.zeros_(self.linear_2.bias) - nn.init.zeros_(self.linear_4.weight) - nn.init.zeros_(self.linear_4.bias) - - def forward(self, seq, state): - ''' - Predict side-chain torsion angles along with backbone torsions - Inputs: - - seq: hidden embeddings corresponding to query sequence (B, L, d_msa) - - state: state feature (output l0 feature) from previous SE3 layer (B, L, d_state) - Outputs: - - si: predicted torsion angles (phi, psi, omega, chi1~4 with cos/sin, Cb bend, Cb twist, CG) (B, L, 10, 2) - ''' - B, L = seq.shape[:2] - seq = self.norm_s0(seq) - state = self.norm_si(state) - si = self.linear_s0(seq) + self.linear_si(state) - - si = si + self.linear_2(F.relu_(self.linear_1(F.relu_(si)))) - si = si + self.linear_4(F.relu_(self.linear_3(F.relu_(si)))) - - si = self.linear_out(F.relu_(si)) - return si.view(B, L, 10, 2) - - -class Str2Str(nn.Module): - def __init__(self, d_msa=256, d_pair=128, d_state=16, - SE3_param={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32}, p_drop=0.1): - super(Str2Str, self).__init__() - - # initial node & pair feature process - self.norm_msa = nn.LayerNorm(d_msa) - self.norm_pair = nn.LayerNorm(d_pair) - self.norm_state = nn.LayerNorm(d_state) - - self.embed_x = nn.Linear(d_msa+d_state, SE3_param['l0_in_features']) - self.embed_e1 = nn.Linear(d_pair, SE3_param['num_edge_features']) - self.embed_e2 = nn.Linear(SE3_param['num_edge_features']+36+1, SE3_param['num_edge_features']) - - self.norm_node = nn.LayerNorm(SE3_param['l0_in_features']) - self.norm_edge1 = nn.LayerNorm(SE3_param['num_edge_features']) - self.norm_edge2 = nn.LayerNorm(SE3_param['num_edge_features']) - - self.se3 = SE3TransformerWrapper(**SE3_param) - self.sc_predictor = SCPred(d_msa=d_msa, d_state=SE3_param['l0_out_features'], - p_drop=p_drop) - - self.reset_parameter() - - def reset_parameter(self): - # initialize weights to normal distribution - self.embed_x = init_lecun_normal(self.embed_x) - self.embed_e1 = init_lecun_normal(self.embed_e1) - self.embed_e2 = init_lecun_normal(self.embed_e2) - - # initialize bias to zeros - nn.init.zeros_(self.embed_x.bias) - nn.init.zeros_(self.embed_e1.bias) - nn.init.zeros_(self.embed_e2.bias) - - @torch.cuda.amp.autocast(enabled=False) - def forward(self, msa, pair, R_in, T_in, xyz, state, idx, top_k=64, eps=1e-5): - B, N, L = msa.shape[:3] - - state = state.type(torch.float32) - mas = msa.type(torch.float32) - pair = pair.type(torch.float32) - R_in = R_in.type(torch.float32) - T_in = T_in.type(torch.float32) - xyz = xyz.type(torch.float32) - - #ic(msa.dtype) - #ic(pair.dtype) - #ic(R_in.dtype) - #ic(T_in.dtype) - #ic(xyz.dtype) - #ic(state.dtype) - #ic(idx.dtype) - - - # process msa & pair features - node = self.norm_msa(msa[:,0]) - pair = self.norm_pair(pair) - state = self.norm_state(state) - - node = torch.cat((node, state), dim=-1) - node = self.norm_node(self.embed_x(node)) - pair = self.norm_edge1(self.embed_e1(pair)) - - neighbor = get_seqsep(idx) - rbf_feat = rbf(torch.cdist(xyz[:,:,1], xyz[:,:,1])) - pair = torch.cat((pair, rbf_feat, neighbor), dim=-1) - pair = self.norm_edge2(self.embed_e2(pair)) - - # define graph - if top_k != 0: - G, edge_feats = make_topk_graph(xyz[:,:,1,:], pair, idx, top_k=top_k) - else: - G, edge_feats = make_full_graph(xyz[:,:,1,:], pair, idx, top_k=top_k) - l1_feats = xyz - xyz[:,:,1,:].unsqueeze(2) - l1_feats = l1_feats.reshape(B*L, -1, 3) - - # apply SE(3) Transformer & update coordinates - shift = self.se3(G, node.reshape(B*L, -1, 1), l1_feats, edge_feats) - - state = shift['0'].reshape(B, L, -1) # (B, L, C) - - offset = shift['1'].reshape(B, L, 2, 3) - delTi = offset[:,:,0,:] / 10.0 # translation - R = offset[:,:,1,:] / 100.0 # rotation - - Qnorm = torch.sqrt( 1 + torch.sum(R*R, dim=-1) ) - qA, qB, qC, qD = 1/Qnorm, R[:,:,0]/Qnorm, R[:,:,1]/Qnorm, R[:,:,2]/Qnorm - - delRi = torch.zeros((B,L,3,3), device=xyz.device) - delRi[:,:,0,0] = qA*qA+qB*qB-qC*qC-qD*qD - delRi[:,:,0,1] = 2*qB*qC - 2*qA*qD - delRi[:,:,0,2] = 2*qB*qD + 2*qA*qC - delRi[:,:,1,0] = 2*qB*qC + 2*qA*qD - delRi[:,:,1,1] = qA*qA-qB*qB+qC*qC-qD*qD - delRi[:,:,1,2] = 2*qC*qD - 2*qA*qB - delRi[:,:,2,0] = 2*qB*qD - 2*qA*qC - delRi[:,:,2,1] = 2*qC*qD + 2*qA*qB - delRi[:,:,2,2] = qA*qA-qB*qB-qC*qC+qD*qD - # - ## convert vector to rotation matrix - #R_angle = torch.norm(R, dim=-1, keepdim=True) # (B, L, 1) - #cos_angle = torch.cos(R_angle).unsqueeze(2) # (B, L, 1, 1) - #sin_angle = torch.sin(R_angle).unsqueeze(2) # (B, L, 1, 1) - #R_vector = R / (R_angle+eps) # (B, L, 3) - - #delRi = cos_angle*torch.eye(3, device=R.device).reshape(1,1,3,3) \ - # + sin_angle*cross_product_matrix(R_vector) \ - # + (1.0-cos_angle)*einsum('bni,bnj->bnij', R_vector, R_vector) - - Ri = einsum('bnij,bnjk->bnik', delRi, R_in) - Ti = delTi + T_in #einsum('bnij,bnj->bni', delRi, T_in) + delTi - - alpha = self.sc_predictor(msa[:,0], state) - return Ri, Ti, state, alpha - -class IterBlock(nn.Module): - def __init__(self, d_msa=256, d_pair=128, - n_head_msa=8, n_head_pair=4, - use_global_attn=False, - d_hidden=32, d_hidden_msa=None, p_drop=0.15, - SE3_param={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32}): - super(IterBlock, self).__init__() - if d_hidden_msa == None: - d_hidden_msa = d_hidden - - self.msa2msa = MSAPairStr2MSA(d_msa=d_msa, d_pair=d_pair, - n_head=n_head_msa, - d_state=SE3_param['l0_out_features'], - use_global_attn=use_global_attn, - d_hidden=d_hidden_msa, p_drop=p_drop) - self.msa2pair = MSA2Pair(d_msa=d_msa, d_pair=d_pair, - d_hidden=d_hidden//2, p_drop=p_drop) - #d_hidden=d_hidden, p_drop=p_drop) - self.pair2pair = PairStr2Pair(d_pair=d_pair, n_head=n_head_pair, - d_hidden=d_hidden, p_drop=p_drop) - self.str2str = Str2Str(d_msa=d_msa, d_pair=d_pair, - d_state=SE3_param['l0_out_features'], - SE3_param=SE3_param, - p_drop=p_drop) - - def forward(self, msa, pair, R_in, T_in, xyz, state, idx, use_checkpoint=False): - rbf_feat = rbf(torch.cdist(xyz[:,:,1,:], xyz[:,:,1,:])) - if use_checkpoint: - msa = checkpoint.checkpoint(create_custom_forward(self.msa2msa), msa, pair, rbf_feat, state) - pair = checkpoint.checkpoint(create_custom_forward(self.msa2pair), msa, pair) - pair = checkpoint.checkpoint(create_custom_forward(self.pair2pair), pair, rbf_feat) - R, T, state, alpha = checkpoint.checkpoint(create_custom_forward(self.str2str, top_k=0), msa, pair, R_in, T_in, xyz, state, idx) - else: - msa = self.msa2msa(msa, pair, rbf_feat, state) - pair = self.msa2pair(msa, pair) - pair = self.pair2pair(pair, rbf_feat) - R, T, state, alpha = self.str2str(msa, pair, R_in, T_in, xyz, state, idx, top_k=0) - - return msa, pair, R, T, state, alpha - -class IterativeSimulator(nn.Module): - def __init__(self, n_extra_block=4, n_main_block=12, n_ref_block=4, - d_msa=256, d_msa_full=64, d_pair=128, d_hidden=32, - n_head_msa=8, n_head_pair=4, - SE3_param_full={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32}, - SE3_param_topk={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32}, - p_drop=0.15): - super(IterativeSimulator, self).__init__() - self.n_extra_block = n_extra_block - self.n_main_block = n_main_block - self.n_ref_block = n_ref_block - - self.proj_state = nn.Linear(SE3_param_topk['l0_out_features'], SE3_param_full['l0_out_features']) - # Update with extra sequences - if n_extra_block > 0: - self.extra_block = nn.ModuleList([IterBlock(d_msa=d_msa_full, d_pair=d_pair, - n_head_msa=n_head_msa, - n_head_pair=n_head_pair, - d_hidden_msa=8, - d_hidden=d_hidden, - p_drop=p_drop, - use_global_attn=True, - SE3_param=SE3_param_full) - for i in range(n_extra_block)]) - - # Update with seed sequences - if n_main_block > 0: - self.main_block = nn.ModuleList([IterBlock(d_msa=d_msa, d_pair=d_pair, - n_head_msa=n_head_msa, - n_head_pair=n_head_pair, - d_hidden=d_hidden, - p_drop=p_drop, - use_global_attn=False, - SE3_param=SE3_param_full) - for i in range(n_main_block)]) - - self.proj_state2 = nn.Linear(SE3_param_full['l0_out_features'], SE3_param_topk['l0_out_features']) - # Final SE(3) refinement - if n_ref_block > 0: - self.str_refiner = Str2Str(d_msa=d_msa, d_pair=d_pair, - d_state=SE3_param_topk['l0_out_features'], - SE3_param=SE3_param_topk, - p_drop=p_drop) - - self.reset_parameter() - def reset_parameter(self): - self.proj_state = init_lecun_normal(self.proj_state) - nn.init.zeros_(self.proj_state.bias) - self.proj_state2 = init_lecun_normal(self.proj_state2) - nn.init.zeros_(self.proj_state2.bias) - - def forward(self, seq, msa, msa_full, pair, xyz_in, state, idx, use_checkpoint=False): - # input: - # seq: query sequence (B, L) - # msa: seed MSA embeddings (B, N, L, d_msa) - # msa_full: extra MSA embeddings (B, N, L, d_msa_full) - # pair: initial residue pair embeddings (B, L, L, d_pair) - # xyz_in: initial BB coordinates (B, L, n_atom, 3) - # state: initial state features containing mixture of query seq, sidechain, accuracy info (B, L, d_state) - # idx: residue index - - B, L = pair.shape[:2] - - R_in = torch.eye(3, device=xyz_in.device).reshape(1,1,3,3).expand(B, L, -1, -1) - T_in = xyz_in[:,:,1].clone() - xyz_in = xyz_in - T_in.unsqueeze(-2) - - state = self.proj_state(state) - - R_s = list() - T_s = list() - alpha_s = list() - for i_m in range(self.n_extra_block): - R_in = R_in.detach() # detach rotation (for stability) - T_in = T_in.detach() - # Get current BB structure - xyz = einsum('bnij,bnaj->bnai', R_in, xyz_in) + T_in.unsqueeze(-2) - - msa_full, pair, R_in, T_in, state, alpha = self.extra_block[i_m](msa_full, pair, - R_in, T_in, xyz, state, idx, - use_checkpoint=use_checkpoint) - R_s.append(R_in) - T_s.append(T_in) - alpha_s.append(alpha) - - for i_m in range(self.n_main_block): - R_in = R_in.detach() - T_in = T_in.detach() - # Get current BB structure - xyz = einsum('bnij,bnaj->bnai', R_in, xyz_in) + T_in.unsqueeze(-2) - - msa, pair, R_in, T_in, state, alpha = self.main_block[i_m](msa, pair, - R_in, T_in, xyz, state, idx, - use_checkpoint=use_checkpoint) - R_s.append(R_in) - T_s.append(T_in) - alpha_s.append(alpha) - - state = self.proj_state2(state) - for i_m in range(self.n_ref_block): - R_in = R_in.detach() - T_in = T_in.detach() - xyz = einsum('bnij,bnaj->bnai', R_in, xyz_in) + T_in.unsqueeze(-2) - R_in, T_in, state, alpha = self.str_refiner(msa, pair, R_in, T_in, xyz, state, idx, top_k=64) - R_s.append(R_in) - T_s.append(T_in) - alpha_s.append(alpha) - - R_s = torch.stack(R_s, dim=0) - T_s = torch.stack(T_s, dim=0) - alpha_s = torch.stack(alpha_s, dim=0) - - return msa, pair, R_s, T_s, alpha_s, state diff --git a/spaces/merve/dataset-worldviews/public/anonymization/annotations.js b/spaces/merve/dataset-worldviews/public/anonymization/annotations.js deleted file mode 100644 index ed45db46369d1bb2a709b20bd97c29451d4284c0..0000000000000000000000000000000000000000 --- a/spaces/merve/dataset-worldviews/public/anonymization/annotations.js +++ /dev/null @@ -1,38 +0,0 @@ -var annotations = - -[ -] - - - - -function addSwoop(c){ - var swoopy = d3.swoopyDrag() - .x(d => c.x(d.x)) - .y(d => c.y(d.y)) - .draggable(0) - .annotations(annotations) - - var swoopySel = c.svg.append('g.annotations').call(swoopy) - - c.svg.append('marker#arrow') - .attr('viewBox', '-10 -10 20 20') - .attr('markerWidth', 20) - .attr('markerHeight', 20) - .attr('orient', 'auto') - .append('path').at({d: 'M-6.75,-6.75 L 0,0 L -6.75,6.75'}) - - - swoopySel.selectAll('path').attr('marker-end', 'url(#arrow)') - window.annotationSel = swoopySel.selectAll('g') - .st({fontSize: 12, opacity: d => d.slide == 0 ? 1 : 0}) - - swoopySel.selectAll('text') - .each(function(d){ - d3.select(this) - .text('') //clear existing text - .tspans(d3.wordwrap(d.text, d.width || 20), 12) //wrap after 20 char - }) -} - - diff --git a/spaces/monra/freegpt-webui-chimera/README.md b/spaces/monra/freegpt-webui-chimera/README.md deleted file mode 100644 index baa9598584fecfcc24deae8143f363a0e2ded3cf..0000000000000000000000000000000000000000 --- a/spaces/monra/freegpt-webui-chimera/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Freegpt Webui Chimera -emoji: ⚡ -colorFrom: indigo -colorTo: gray -sdk: docker -pinned: false -app_port: 1338 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/monra/freegpt-webui/client/css/message.css b/spaces/monra/freegpt-webui/client/css/message.css deleted file mode 100644 index 64e04147ee4d1e76dda4f39c4f756c9da63e3874..0000000000000000000000000000000000000000 --- a/spaces/monra/freegpt-webui/client/css/message.css +++ /dev/null @@ -1,65 +0,0 @@ -.message { - width: 100%; - overflow-wrap: break-word; - display: flex; - gap: var(--section-gap); - padding: var(--section-gap); - padding-bottom: 0; -} - -.message:last-child { - animation: 0.6s show_message; -} - -@keyframes show_message { - from { - transform: translateY(10px); - opacity: 0; - } -} - -.message .avatar-container img { - max-width: 48px; - max-height: 48px; - box-shadow: 0.4px 0.5px 0.7px -2px rgba(0, 0, 0, 0.08), 1.1px 1.3px 2px -2px rgba(0, 0, 0, 0.041), - 2.7px 3px 4.8px -2px rgba(0, 0, 0, 0.029), 9px 10px 16px -2px rgba(0, 0, 0, 0.022); -} - -.message .content { - display: flex; - flex-direction: column; - width: 90%; - gap: 18px; -} - -.message .content p, -.message .content li, -.message .content code { - font-size: 1rem; - line-height: 1.3; -} - -@media screen and (max-height: 720px) { - .message { - padding: 12px; - gap: 0; - } - - .message .content { - margin-left: 8px; - width: 80%; - } - - .message .avatar-container img { - max-width: 32px; - max-height: 32px; - } - - .message .content, - .message .content p, - .message .content li, - .message .content code { - font-size: 0.875rem; - line-height: 1.3; - } -} diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/audio/feature_transforms/utterance_cmvn.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/data/audio/feature_transforms/utterance_cmvn.py deleted file mode 100644 index 6bbd0ae821b42ab693f4141e7c161d6d7cb0b15a..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/audio/feature_transforms/utterance_cmvn.py +++ /dev/null @@ -1,40 +0,0 @@ -import numpy as np -from fairseq.data.audio.feature_transforms import ( - AudioFeatureTransform, - register_audio_feature_transform, -) - - -@register_audio_feature_transform("utterance_cmvn") -class UtteranceCMVN(AudioFeatureTransform): - """Utterance-level CMVN (cepstral mean and variance normalization)""" - - @classmethod - def from_config_dict(cls, config=None): - _config = {} if config is None else config - return UtteranceCMVN( - _config.get("norm_means", True), - _config.get("norm_vars", True), - ) - - def __init__(self, norm_means=True, norm_vars=True): - self.norm_means, self.norm_vars = norm_means, norm_vars - - def __repr__(self): - return ( - self.__class__.__name__ - + f"(norm_means={self.norm_means}, norm_vars={self.norm_vars})" - ) - - def __call__(self, x): - mean = x.mean(axis=0) - square_sums = (x ** 2).sum(axis=0) - - if self.norm_means: - x = np.subtract(x, mean) - if self.norm_vars: - var = square_sums / x.shape[0] - mean ** 2 - std = np.sqrt(np.maximum(var, 1e-10)) - x = np.divide(x, std) - - return x diff --git a/spaces/mueller-franzes/medfusion-app/scripts/train_diffusion.py b/spaces/mueller-franzes/medfusion-app/scripts/train_diffusion.py deleted file mode 100644 index 6416169bf9c1efe9883ea585738b5b7206452116..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/scripts/train_diffusion.py +++ /dev/null @@ -1,183 +0,0 @@ - -from email.mime import audio -from pathlib import Path -from datetime import datetime - -import torch -import torch.nn as nn -from pytorch_lightning.trainer import Trainer -from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint -import numpy as np -import torchio as tio - -from medical_diffusion.data.datamodules import SimpleDataModule -from medical_diffusion.data.datasets import AIROGSDataset, MSIvsMSS_2_Dataset, CheXpert_2_Dataset -from medical_diffusion.models.pipelines import DiffusionPipeline -from medical_diffusion.models.estimators import UNet -from medical_diffusion.external.stable_diffusion.unet_openai import UNetModel -from medical_diffusion.models.noise_schedulers import GaussianNoiseScheduler -from medical_diffusion.models.embedders import LabelEmbedder, TimeEmbbeding -from medical_diffusion.models.embedders.latent_embedders import VAE, VAEGAN, VQVAE, VQGAN - -import torch.multiprocessing -torch.multiprocessing.set_sharing_strategy('file_system') - - - -if __name__ == "__main__": - # ------------ Load Data ---------------- - # ds = AIROGSDataset( - # crawler_ext='jpg', - # augment_horizontal_flip = False, - # augment_vertical_flip = False, - # # path_root='/home/gustav/Documents/datasets/AIROGS/data_256x256/', - # path_root='/mnt/hdd/datasets/eye/AIROGS/data_256x256', - # ) - - # ds = MSIvsMSS_2_Dataset( - # crawler_ext='jpg', - # image_resize=None, - # image_crop=None, - # augment_horizontal_flip=False, - # augment_vertical_flip=False, - # # path_root='/home/gustav/Documents/datasets/Kather_2/train', - # path_root='/mnt/hdd/datasets/pathology/kather_msi_mss_2/train/', - # ) - - ds = CheXpert_2_Dataset( # 256x256 - augment_horizontal_flip=False, - augment_vertical_flip=False, - path_root = '/mnt/hdd/datasets/chest/CheXpert/ChecXpert-v10/preprocessed_tianyu' - ) - - dm = SimpleDataModule( - ds_train = ds, - batch_size=32, - # num_workers=0, - pin_memory=True, - # weights=ds.get_weights() - ) - - current_time = datetime.now().strftime("%Y_%m_%d_%H%M%S") - path_run_dir = Path.cwd() / 'runs' / str(current_time) - path_run_dir.mkdir(parents=True, exist_ok=True) - accelerator = 'gpu' if torch.cuda.is_available() else 'cpu' - - - - # ------------ Initialize Model ------------ - # cond_embedder = None - cond_embedder = LabelEmbedder - cond_embedder_kwargs = { - 'emb_dim': 1024, - 'num_classes': 2 - } - - - time_embedder = TimeEmbbeding - time_embedder_kwargs ={ - 'emb_dim': 1024 # stable diffusion uses 4*model_channels (model_channels is about 256) - } - - - noise_estimator = UNet - noise_estimator_kwargs = { - 'in_ch':8, - 'out_ch':8, - 'spatial_dims':2, - 'hid_chs': [ 256, 256, 512, 1024], - 'kernel_sizes':[3, 3, 3, 3], - 'strides': [1, 2, 2, 2], - 'time_embedder':time_embedder, - 'time_embedder_kwargs': time_embedder_kwargs, - 'cond_embedder':cond_embedder, - 'cond_embedder_kwargs': cond_embedder_kwargs, - 'deep_supervision': False, - 'use_res_block':True, - 'use_attention':'none', - } - - - # ------------ Initialize Noise ------------ - noise_scheduler = GaussianNoiseScheduler - noise_scheduler_kwargs = { - 'timesteps': 1000, - 'beta_start': 0.002, # 0.0001, 0.0015 - 'beta_end': 0.02, # 0.01, 0.0195 - 'schedule_strategy': 'scaled_linear' - } - - # ------------ Initialize Latent Space ------------ - # latent_embedder = None - # latent_embedder = VQVAE - latent_embedder = VAE - latent_embedder_checkpoint = 'runs/2022_12_12_133315_chest_vaegan/last_vae.ckpt' - - # ------------ Initialize Pipeline ------------ - pipeline = DiffusionPipeline( - noise_estimator=noise_estimator, - noise_estimator_kwargs=noise_estimator_kwargs, - noise_scheduler=noise_scheduler, - noise_scheduler_kwargs = noise_scheduler_kwargs, - latent_embedder=latent_embedder, - latent_embedder_checkpoint = latent_embedder_checkpoint, - estimator_objective='x_T', - estimate_variance=False, - use_self_conditioning=False, - use_ema=False, - classifier_free_guidance_dropout=0.5, # Disable during training by setting to 0 - do_input_centering=False, - clip_x0=False, - sample_every_n_steps=1000 - ) - - # pipeline_old = pipeline.load_from_checkpoint('runs/2022_11_27_085654_chest_diffusion/last.ckpt') - # pipeline.noise_estimator.load_state_dict(pipeline_old.noise_estimator.state_dict(), strict=True) - - # -------------- Training Initialization --------------- - to_monitor = "train/loss" # "pl/val_loss" - min_max = "min" - save_and_sample_every = 100 - - early_stopping = EarlyStopping( - monitor=to_monitor, - min_delta=0.0, # minimum change in the monitored quantity to qualify as an improvement - patience=30, # number of checks with no improvement - mode=min_max - ) - checkpointing = ModelCheckpoint( - dirpath=str(path_run_dir), # dirpath - monitor=to_monitor, - every_n_train_steps=save_and_sample_every, - save_last=True, - save_top_k=2, - mode=min_max, - ) - trainer = Trainer( - accelerator=accelerator, - # devices=[0], - # precision=16, - # amp_backend='apex', - # amp_level='O2', - # gradient_clip_val=0.5, - default_root_dir=str(path_run_dir), - callbacks=[checkpointing], - # callbacks=[checkpointing, early_stopping], - enable_checkpointing=True, - check_val_every_n_epoch=1, - log_every_n_steps=save_and_sample_every, - auto_lr_find=False, - # limit_train_batches=1000, - limit_val_batches=0, # 0 = disable validation - Note: Early Stopping no longer available - min_epochs=100, - max_epochs=1001, - num_sanity_val_steps=2, - ) - - # ---------------- Execute Training ---------------- - trainer.fit(pipeline, datamodule=dm) - - # ------------- Save path to best model ------------- - pipeline.save_best_checkpoint(trainer.logger.log_dir, checkpointing.best_model_path) - - diff --git a/spaces/mygyasir/Real-Time-Voice-Cloning/synthesizer_preprocess_audio.py b/spaces/mygyasir/Real-Time-Voice-Cloning/synthesizer_preprocess_audio.py deleted file mode 100644 index fd4d01d476d77391322aef9d9d5a005adb1f5c15..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/Real-Time-Voice-Cloning/synthesizer_preprocess_audio.py +++ /dev/null @@ -1,59 +0,0 @@ -from synthesizer.preprocess import preprocess_dataset -from synthesizer.hparams import hparams -from utils.argutils import print_args -from pathlib import Path -import argparse - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Preprocesses audio files from datasets, encodes them as mel spectrograms " - "and writes them to the disk. Audio files are also saved, to be used by the " - "vocoder for training.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("datasets_root", type=Path, help=\ - "Path to the directory containing your LibriSpeech/TTS datasets.") - parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help=\ - "Path to the output directory that will contain the mel spectrograms, the audios and the " - "embeds. Defaults to /SV2TTS/synthesizer/") - parser.add_argument("-n", "--n_processes", type=int, default=None, help=\ - "Number of processes in parallel.") - parser.add_argument("-s", "--skip_existing", action="store_true", help=\ - "Whether to overwrite existing files with the same name. Useful if the preprocessing was " - "interrupted.") - parser.add_argument("--hparams", type=str, default="", help=\ - "Hyperparameter overrides as a comma-separated list of name-value pairs") - parser.add_argument("--no_trim", action="store_true", help=\ - "Preprocess audio without trimming silences (not recommended).") - parser.add_argument("--no_alignments", action="store_true", help=\ - "Use this option when dataset does not include alignments\ - (these are used to split long audio files into sub-utterances.)") - parser.add_argument("--datasets_name", type=str, default="LibriSpeech", help=\ - "Name of the dataset directory to process.") - parser.add_argument("--subfolders", type=str, default="train-clean-100, train-clean-360", help=\ - "Comma-separated list of subfolders to process inside your dataset directory") - args = parser.parse_args() - - # Process the arguments - if not hasattr(args, "out_dir"): - args.out_dir = args.datasets_root.joinpath("SV2TTS", "synthesizer") - - # Create directories - assert args.datasets_root.exists() - args.out_dir.mkdir(exist_ok=True, parents=True) - - # Verify webrtcvad is available - if not args.no_trim: - try: - import webrtcvad - except: - raise ModuleNotFoundError("Package 'webrtcvad' not found. This package enables " - "noise removal and is recommended. Please install and try again. If installation fails, " - "use --no_trim to disable this error message.") - del args.no_trim - - # Preprocess the dataset - print_args(args, parser) - args.hparams = hparams.parse(args.hparams) - preprocess_dataset(**vars(args)) diff --git a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/docs/Makefile b/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/docs/Makefile deleted file mode 100644 index d4bb2cbb9eddb1bb1b4f366623044af8e4830919..0000000000000000000000000000000000000000 --- a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/spaces/nathanluskey/twitter_sentiment/README.md b/spaces/nathanluskey/twitter_sentiment/README.md deleted file mode 100644 index 1a71eec4b7f614bbf3c287c109a76738d86dd242..0000000000000000000000000000000000000000 --- a/spaces/nathanluskey/twitter_sentiment/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Twitter Sentiment -emoji: 🐦 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.1.7 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/naver/SuperFeatures/how/layers/attention.py b/spaces/naver/SuperFeatures/how/layers/attention.py deleted file mode 100644 index 547fc9be53b2f18a4b48cb984b9f4997a76c8aae..0000000000000000000000000000000000000000 --- a/spaces/naver/SuperFeatures/how/layers/attention.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Layers producing a 2D attention map from a feature map""" - -from torch import nn - - -class L2Attention(nn.Module): - """Compute the attention as L2-norm of local descriptors""" - - def forward(self, x): - return (x.pow(2.0).sum(1) + 1e-10).sqrt().squeeze(0) diff --git a/spaces/naxida/anime-remove-background/README.md b/spaces/naxida/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/naxida/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Its Quiz Time Free Download __EXCLUSIVE__ [cheat].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Its Quiz Time Free Download __EXCLUSIVE__ [cheat].md deleted file mode 100644 index a6d978a2bd94c9af8696db73fe5d9793a4f77ce0..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Its Quiz Time Free Download __EXCLUSIVE__ [cheat].md +++ /dev/null @@ -1,37 +0,0 @@ -
    -

    How to Download and Cheat in It's Quiz Time - The Ultimate Trivia Game

    -

    If you are a fan of trivia games, you might have heard of It's Quiz Time, the biggest quiz game on console and PC with over 30,000 questions and 11 exciting game modes. You can play solo or with up to 8 friends using your smartphones as controllers, or even stream your game live to Twitch, YouTube, or Mixer and compete with up to 10,000 viewers. But what if you want to download the game for free and cheat your way to the top? Here are some tips and tricks to help you out.

    -

    How to Download It's Quiz Time for Free

    -

    It's Quiz Time is available on Steam for $19.99, but you can also find some cracked versions online that let you play the game without paying. One of them is Its.Quiz.Time.v1.08-0xdeadc0de, which was released by P2P group 0xdeadc0de in April 2023. To download and install this version, follow these steps:

    -

    It's Quiz Time Free Download [cheat]


    Download ————— https://urlcod.com/2uIb6k



    -
      -
    1. Download the zip file from one of the links provided on the website, such as Mega, Uptobox, 1fichier, Pixeldrain, or Mediafire.
    2. -
    3. Extract the zip file using a program like WinRAR or 7-Zip.
    4. -
    5. Launch the game by running ItsQuizTime.exe as administrator.
    6. -
    7. Enjoy!
    8. -
    -

    Note: This version may not work with some antivirus programs, so you may need to disable them before launching the game. Also, this version may not be compatible with the latest updates and features of the official game, so play at your own risk.

    -

    How to Cheat in It's Quiz Time

    -

    If you want to cheat in It's Quiz Time, you have a few options. One of them is to use a program like Cheat Engine, which allows you to modify the game's memory and values. For example, you can use Cheat Engine to freeze your score, change your answer time, or increase your chances of getting a correct answer. To use Cheat Engine with It's Quiz Time, follow these steps:

    -
      -
    1. Download and install Cheat Engine from https://www.cheatengine.org/.
    2. -
    3. Launch It's Quiz Time and start a game mode of your choice.
    4. -
    5. Launch Cheat Engine and click on the computer icon on the top left corner.
    6. -
    7. Select ItsQuizTime.exe from the process list and click Open.
    8. -
    9. On the right panel, click on Add Address Manually.
    10. -
    11. Type in one of the following addresses depending on what you want to cheat:
    12. -
        -
      • Score: 004F6C40
      • -
      • Answer time: 004F6C44
      • -
      • Correct answer chance: 004F6C48
      • -
      -
    13. Click OK and check the Active box.
    14. -
    15. Double-click on the Value column and change it to whatever you want.
    16. -
    17. Go back to the game and see the results.
    18. -
    -

    Note: This method may not work with some game modes or questions, and it may cause glitches or crashes. Also, cheating may ruin the fun and challenge of the game, so use it sparingly and responsibly.

    -

    -

    Conclusion

    -

    It's Quiz Time is a fun and engaging trivia game that you can play alone or with friends online or offline. However, if you want to download it for free or cheat in it, you can use some of the methods described above. Just remember that these methods are not official or supported by the developers, and they may have some risks or drawbacks. So use them at your own discretion and enjoy the game!

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Jai Ho Video Songs Hd 1080p Telugu Bluray Movies Download EXCLUSIVE.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Jai Ho Video Songs Hd 1080p Telugu Bluray Movies Download EXCLUSIVE.md deleted file mode 100644 index 0d55e8103e9de40929fbf735dcff4887524663c1..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Jai Ho Video Songs Hd 1080p Telugu Bluray Movies Download EXCLUSIVE.md +++ /dev/null @@ -1,26 +0,0 @@ -
    -

    How to Download Jai Ho Video Songs in HD 1080p Telugu Bluray Quality

    -

    Jai Ho is a 2014 Bollywood action drama film starring Salman Khan and Daisy Shah. The film features some catchy and energetic video songs that are popular among the fans. If you are looking for a way to download Jai Ho video songs in HD 1080p Telugu bluray quality, you have come to the right place. In this article, we will show you how to download Jai Ho video songs in HD 1080p Telugu bluray quality using a simple and reliable method.

    -

    Jai Ho Video Songs Hd 1080p Telugu Bluray Movies Download


    Downloadhttps://urlcod.com/2uIbeK



    -

    Why Download Jai Ho Video Songs in HD 1080p Telugu Bluray Quality?

    -

    Jai Ho video songs are composed by Sajid-Wajid and Devi Sri Prasad, and sung by various artists like Wajid, Shreya Ghoshal, Shaan, Ujjayinee Roy, Armaan Malik, and more. The video songs are choreographed by Remo D'Souza, Ganesh Acharya, and Mudassar Khan. The video songs showcase the vibrant and colorful culture of India, as well as the action-packed scenes from the film. The video songs are also available in different languages like Hindi, Tamil, and Telugu.

    -

    If you want to enjoy the video songs in the best possible quality, you should download them in HD 1080p Telugu bluray quality. HD 1080p means that the video resolution is 1920 x 1080 pixels, which is the standard for high-definition videos. Telugu bluray means that the audio quality is in Dolby Digital or DTS format, which is the standard for high-quality sound. By downloading Jai Ho video songs in HD 1080p Telugu bluray quality, you can experience the crisp and clear visuals and the rich and immersive sound of the video songs.

    -

    How to Download Jai Ho Video Songs in HD 1080p Telugu Bluray Quality?

    -

    There are many websites that claim to offer Jai Ho video songs in HD 1080p Telugu bluray quality, but not all of them are trustworthy or safe. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Some of them may also have broken links, low-quality videos, or annoying ads that can ruin your experience.

    -

    That's why we recommend you to use a reliable and reputable website that offers Jai Ho video songs in HD 1080p Telugu bluray quality without any hassle or risk. One such website is www.jaiho.com, which is a dedicated website for Jai Ho fans. Here are the steps to download Jai Ho video songs in HD 1080p Telugu bluray quality from www.jaiho.com:

    -

    -
      -
    1. Go to www.jaiho.com on your browser.
    2. -
    3. On the homepage, you will see a list of Jai Ho video songs in different languages. Click on the Telugu option.
    4. -
    5. You will be redirected to a page where you can see all the Jai Ho video songs in Telugu language. Choose the video song that you want to download.
    6. -
    7. You will be taken to another page where you can see the details of the video song, such as the title, duration, singers, composers, etc. You will also see a download button below the video player.
    8. -
    9. Click on the download button and choose the HD 1080p Telugu bluray option from the drop-down menu.
    10. -
    11. A new window will open where you can see the download link for the video song. Right-click on the link and choose "Save link as" or "Save target as" from the menu.
    12. -
    13. Select a location on your device where you want to save the video song file and click on "Save".
    14. -
    15. The download will start automatically and may take a few minutes depending on your internet speed and file size.
    16. -
    17. Once the download is complete, you can open the file and enjoy watching Jai Ho video songs in HD 1080p Telugu bluray quality.
    18. -
    -

    Conclusion

    -

    Jai Ho is a film that celebrates the

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/nijatzeynalov/AzVoiceSent/app.py b/spaces/nijatzeynalov/AzVoiceSent/app.py deleted file mode 100644 index 2fe5bd6d7c75e023cf2309e055ef2500fbbef65a..0000000000000000000000000000000000000000 --- a/spaces/nijatzeynalov/AzVoiceSent/app.py +++ /dev/null @@ -1,55 +0,0 @@ -import whisper -import gradio as gr -from transformers import pipeline - - -model = whisper.load_model("base") -sentiment_analysis = pipeline("sentiment-analysis",model="siebert/sentiment-roberta-large-english") - -def process_audio_file(file): - with open(file, "rb") as f: - inputs = f.read() - - audio = ffmpeg_read(inputs, sampling_rate) - return audio - - -def transcribe(Microphone, File_Upload): - warn_output = "" - if (Microphone is not None) and (File_Upload is not None): - warn_output = "WARNING: You've uploaded an audio file and used the microphone. " \ - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - file = Microphone - - elif (Microphone is None) and (File_Upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - elif Microphone is not None: - file = Microphone - else: - file = File_Upload - - result = model.transcribe(file, task="translate") - text = sentiment_analysis(result['text']) - - label = text[0]['label'] - score = text[0]['score'] - return label, score - -iface = gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type='filepath', optional=True), - gr.inputs.Audio(source="upload", type='filepath', optional=True), - ], - outputs=[ - gr.outputs.Textbox(label="Sentiment"), - gr.outputs.Textbox(label="Score") - ], - layout="horizontal", - theme="huggingface", - title="AzVoiceSent: Sentiment Classification from Voice Transcriptions in Azerbaijani", - description="AzVoiceSent is research project focused on sentiment classification from voice transcriptions in Azerbaijani. The project has the potential to provide valuable insights into the sentiment expressed by speakers in various domains and applications. ", - allow_flagging='never', -) -iface.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/structures/__init__.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/structures/__init__.py deleted file mode 100644 index ed32c5e9d6c4c1599ba960681d9e86889e2cdbd8..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/structures/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from .chart import DensePoseChartPredictorOutput -from .chart_confidence import decorate_predictor_output_class_with_confidences -from .cse_confidence import decorate_cse_predictor_output_class_with_confidences -from .chart_result import ( - DensePoseChartResult, - DensePoseChartResultWithConfidences, - quantize_densepose_chart_result, - compress_quantized_densepose_chart_result, - decompress_compressed_densepose_chart_result, -) -from .cse import DensePoseEmbeddingPredictorOutput -from .data_relative import DensePoseDataRelative -from .list import DensePoseList -from .mesh import Mesh, create_mesh -from .transform_data import DensePoseTransformData, normalized_coords_transform diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_b_in21k_3x.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_b_in21k_3x.py deleted file mode 100644 index 7c3bdce0a2206b3afd1a33245a193292f0cd2a35..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_b_in21k_3x.py +++ /dev/null @@ -1,3 +0,0 @@ -from .cascade_mask_rcnn_mvitv2_b_3x import model, dataloader, optimizer, lr_multiplier, train - -train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_B_in21k.pyth" diff --git a/spaces/nyanko7/niji-playground/app.py b/spaces/nyanko7/niji-playground/app.py deleted file mode 100644 index 6d60136d09862be03e9fee708750abee3cd23498..0000000000000000000000000000000000000000 --- a/spaces/nyanko7/niji-playground/app.py +++ /dev/null @@ -1,150 +0,0 @@ -import gradio as gr -import numpy as np -import time -import requests -import json -import os -import random -import tempfile -import logging -import threading -import asyncio -from PIL import Image -from io import BytesIO -from requests.adapters import HTTPAdapter -from urllib3.util import Retry - -odnapi = os.getenv("odnapi_url") -fetapi = os.getenv("fetapi_url") -auth_token = os.getenv("auth_token") - -# Setup a logger -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -def split_image(img): - width, height = img.size - width_cut = width // 2 - height_cut = height // 2 - return [ - img.crop((0, 0, width_cut, height_cut)), - img.crop((width_cut, 0, width, height_cut)), - img.crop((0, height_cut, width_cut, height)), - img.crop((width_cut, height_cut, width, height)) - ] - -def save_image(img, suffix='.png'): - with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp: - img.save(tmp, 'PNG') - return tmp.name - -async def niji_api(prompt, progress=gr.Progress(), max_retries=5, backoff_factor=0.1): - iters = 1 - progress(iters/32, desc="Sending request to MidJourney Server") - session = requests.Session() # Using Session to reuse the underlying TCP connection - retries = Retry(total=max_retries, backoff_factor=backoff_factor, status_forcelist=[429, 500, 502, 503, 504]) - adapter = HTTPAdapter(max_retries=retries) - session.mount("http://", adapter) - session.mount("https://", adapter) - try: - response = session.post( - fetapi, - headers={'Content-Type': 'application/json'}, - data=json.dumps({'msg': prompt}), - timeout=5.0 # Here, the timeout duration is set to 5 seconds - ) - response.raise_for_status() # Check for HTTP errors. - except requests.exceptions.RequestException as e: - logger.error(f"Failed to make POST request") - raise ValueError("Invalid Response") - data = response.json() - message_id = data['messageId'] - prog = 0 - iters += 5 - progress(iters/48, desc="Waiting in the generate queue") - - def fetch_image(url): - try: - response = session.get(url, timeout=5.0) - return Image.open(BytesIO(response.content)) - except requests.exceptions.RequestException as e: - logger.error(f"Failed to fetch image") - return None - - def download_and_split_image(url): - try: - img = fetch_image(url) - images = split_image(img) - return [save_image(i) for i in images] - except Exception: - pass - - while prog < 100: - try: - response = session.get( - f'{odnapi}/message/{message_id}?expireMins=2', - headers={'Authorization': auth_token}, - timeout=5.0 - ) - response.raise_for_status() - except requests.exceptions.RequestException as e: - logger.warning(f"Failure in getting message response") - continue - data = response.json() - prog = data.get('progress', 0) - if progress_image_url := data.get('progressImageUrl'): - iters = -100 - yield [(img, f"{prog}% done") for img in download_and_split_image(progress_image_url)] - - wait_time = random.uniform(1, 2) - await asyncio.sleep(wait_time) - - r = iters/48 - if r < 0.4: - desc = "Waiting in the generate queue" - elif r < 0.6: - desc = "Still queueing" - elif r < 0.8: - desc = "Almost done" - if iters > 0: - progress(r, desc=desc) - iters += random.uniform(1, 2) - - # Process the final image urls - image_url = data['response']['imageUrl'] - yield [(img, f"image {idx+1}/4") for idx, img in enumerate(download_and_split_image(image_url))] - -with gr.Blocks() as demo: - gr.HTML(''' -
    -
    -

    - MidJourney / NijiJourney Playground 🎨 -

    -
    -
    -

    - Demo for the MidJourney, draw with heart and love. -

    -
    - ''') - with gr.Column(variant="panel"): - with gr.Row(): - text = gr.Textbox( - label="Enter your prompt", - value="1girl,long hair,looking at viewer,kawaii,serafuku --s 250 --niji 5", - max_lines=3, - container=False, - ) - btn = gr.Button("Generate image", scale=0) - gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery", height="4096") - btn.click(niji_api, text, gallery) - -demo.queue(concurrency_count=2) -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/metrics/ssim.py b/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/metrics/ssim.py deleted file mode 100644 index 2a3a431813678cf5d02f0d0b8185712be16f9e24..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/metrics/ssim.py +++ /dev/null @@ -1,46 +0,0 @@ -import cv2 -import numpy as np - - -def calculate_ssim(img1, img2): - C1 = (0.01 * 255)**2 - C2 = (0.03 * 255)**2 - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - kernel = cv2.getGaussianKernel(11, 1.5) - window = np.outer(kernel, kernel.transpose()) - - mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid - mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] - mu1_sq = mu1**2 - mu2_sq = mu2**2 - mu1_mu2 = mu1 * mu2 - sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq - sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq - sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * - (sigma1_sq + sigma2_sq + C2)) - return ssim_map.mean() - - -def ssim(img1, img2): - '''calculate SSIM - the same outputs as MATLAB's - img1, img2: [0, 255] - ''' - if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') - if img1.ndim == 2: - return calculate_ssim(img1, img2) - elif img1.ndim == 3: - if img1.shape[2] == 3: - ssims = [] - for i in range(3): - ssims.append(calculate_ssim(img1[:, :, i], img2[:, :, i])) - return np.array(ssims).mean() - elif img1.shape[2] == 1: - return calculate_ssim(np.squeeze(img1), np.squeeze(img2)) - else: - raise ValueError('Wrong input image dimensions.') \ No newline at end of file diff --git a/spaces/oguzakif/video-object-remover/SiamMask/tools/train_siamrpn.py b/spaces/oguzakif/video-object-remover/SiamMask/tools/train_siamrpn.py deleted file mode 100644 index fb666c3978850911f4fb1e5c4a1bd6f4930a04be..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/tools/train_siamrpn.py +++ /dev/null @@ -1,292 +0,0 @@ -# -------------------------------------------------------- -# SiamMask -# Licensed under The MIT License -# Written by Qiang Wang (wangqiang2015 at ia.ac.cn) -# -------------------------------------------------------- -import argparse -import logging -import os -import shutil -import time -import torch -from torch.utils.data import DataLoader - -from utils.log_helper import init_log, print_speed, add_file_handler, Dummy -from utils.load_helper import load_pretrain, restore_from -from utils.average_meter_helper import AverageMeter - -from datasets.siam_rpn_dataset import DataSets -import models as models -import math - -from utils.lr_helper import build_lr_scheduler -from tensorboardX import SummaryWriter - -from utils.config_helper import load_config -import json -import cv2 -from torch.utils.collect_env import get_pretty_env_info - -torch.backends.cudnn.benchmark = True - -model_zoo = sorted(name for name in models.__dict__ - if not name.startswith("__") - and callable(models.__dict__[name])) - -parser = argparse.ArgumentParser(description='PyTorch Tracking Training') - -parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', - help='number of data loading workers (default: 16)') -parser.add_argument('--epochs', default=50, type=int, metavar='N', - help='number of total epochs to run') -parser.add_argument('--start-epoch', default=0, type=int, metavar='N', - help='manual epoch number (useful on restarts)') -parser.add_argument('-b', '--batch', default=64, type=int, - metavar='N', help='mini-batch size (default: 64)') -parser.add_argument('--lr', '--learning-rate', default=0.001, type=float, - metavar='LR', help='initial learning rate') -parser.add_argument('--momentum', default=0.9, type=float, metavar='M', - help='momentum') -parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, - metavar='W', help='weight decay (default: 1e-4)') -parser.add_argument('--clip', default=10.0, type=float, - help='gradient clip value') -parser.add_argument('--print-freq', '-p', default=10, type=int, - metavar='N', help='print frequency (default: 10)') -parser.add_argument('--resume', default='', type=str, metavar='PATH', - help='path to latest checkpoint (default: none)') -parser.add_argument('--pretrained', dest='pretrained', default='', - help='use pre-trained model') -parser.add_argument('--config', dest='config', required=True, - help='hyperparameter of SiamRPN in json format') -parser.add_argument('--arch', dest='arch', default='', choices=model_zoo + ['Custom',''], - help='architecture of pretrained model') -parser.add_argument('-l', '--log', default="log.txt", type=str, - help='log file') -parser.add_argument('-s', '--save_dir', default='snapshot', type=str, - help='save dir') -parser.add_argument('--log-dir', default='board', help='TensorBoard log dir') - -best_acc = 0. - - -def collect_env_info(): - env_str = get_pretty_env_info() - env_str += "\n OpenCV ({})".format(cv2.__version__) - return env_str - - -def build_data_loader(cfg): - logger = logging.getLogger('global') - - logger.info("build train dataset") # train_dataset - train_set = DataSets(cfg['train_datasets'], cfg['anchors'], args.epochs) - train_set.shuffle() - - logger.info("build val dataset") # val_dataset - if not 'val_datasets' in cfg.keys(): - cfg['val_datasets'] = cfg['train_datasets'] - val_set = DataSets(cfg['val_datasets'], cfg['anchors']) - val_set.shuffle() - - train_loader = DataLoader(train_set, batch_size=args.batch, num_workers=args.workers, - pin_memory=True, sampler=None) - val_loader = DataLoader(val_set, batch_size=args.batch, num_workers=args.workers, - pin_memory=True, sampler=None) - - logger.info('build dataset done') - return train_loader, val_loader - - -def build_opt_lr(model, cfg, args, epoch): - trainable_params = model.features.param_groups(cfg['lr']['start_lr'], cfg['lr']['feature_lr_mult']) + \ - model.rpn_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['rpn_lr_mult']) - - optimizer = torch.optim.SGD(trainable_params, args.lr, - momentum=args.momentum, - weight_decay=args.weight_decay) - - lr_scheduler = build_lr_scheduler(optimizer, cfg['lr'], epochs=args.epochs) - - lr_scheduler.step(epoch) - - return optimizer, lr_scheduler - - -def main(): - global args, best_acc, tb_writer, logger - args = parser.parse_args() - - init_log('global', logging.INFO) - - if args.log != "": - add_file_handler('global', args.log, logging.INFO) - - logger = logging.getLogger('global') - logger.info(args) - - cfg = load_config(args) - - logger.info("config \n{}".format(json.dumps(cfg, indent=4))) - - logger.info("\n" + collect_env_info()) - - if args.log_dir: - tb_writer = SummaryWriter(args.log_dir) - else: - tb_writer = Dummy() - - # build dataset - train_loader, val_loader = build_data_loader(cfg) - - if args.arch == 'Custom': - from custom import Custom - model = Custom(pretrain=True, anchors=cfg['anchors']) - else: - model = models.__dict__[args.arch](anchors=cfg['anchors']) - - logger.info(model) - - if args.pretrained: - model = load_pretrain(model, args.pretrained) - - model = model.cuda() - dist_model = torch.nn.DataParallel(model, list(range(torch.cuda.device_count()))).cuda() - - if args.resume and args.start_epoch != 0: - model.features.unfix((args.start_epoch - 1) / args.epochs) - - optimizer, lr_scheduler = build_opt_lr(model, cfg, args, args.start_epoch) - logger.info(lr_scheduler) - # optionally resume from a checkpoint - if args.resume: - assert os.path.isfile(args.resume), '{} is not a valid file'.format(args.resume) - model, optimizer, args.start_epoch, best_acc, arch = restore_from(model, optimizer, args.resume) - dist_model = torch.nn.DataParallel(model, list(range(torch.cuda.device_count()))).cuda() - epoch = args.start_epoch - if dist_model.module.features.unfix(epoch/args.epochs): - logger.info('unfix part model.') - optimizer, lr_scheduler = build_opt_lr(dist_model.module, cfg, args, epoch) - lr_scheduler.step(epoch) - cur_lr = lr_scheduler.get_cur_lr() - logger.info('epoch:{} resume lr {}'.format(epoch, cur_lr)) - - logger.info('model prepare done') - - train(train_loader, dist_model, optimizer, lr_scheduler, args.start_epoch, cfg) - - -def train(train_loader, model, optimizer, lr_scheduler, epoch, cfg): - global tb_index, best_acc, cur_lr - cur_lr = lr_scheduler.get_cur_lr() - logger = logging.getLogger('global') - avg = AverageMeter() - model.train() - model = model.cuda() - end = time.time() - - def is_valid_number(x): - return not(math.isnan(x) or math.isinf(x) or x > 1e4) - - num_per_epoch = len(train_loader.dataset) // args.epochs // args.batch - start_epoch = epoch - epoch = epoch - for iter, input in enumerate(train_loader): - # next epoch - if epoch != iter // num_per_epoch + start_epoch: - epoch = iter // num_per_epoch + start_epoch - - if not os.path.exists(args.save_dir): # makedir/save model - os.makedirs(args.save_dir) - - save_checkpoint({ - 'epoch': epoch, - 'arch': args.arch, - 'state_dict': model.module.state_dict(), - 'best_acc': best_acc, - 'optimizer': optimizer.state_dict(), - 'anchor_cfg': cfg['anchors'] - }, False, - os.path.join(args.save_dir, 'checkpoint_e%d.pth' % (epoch)), - os.path.join(args.save_dir, 'best.pth')) - - if epoch == args.epochs: - return - - if model.module.features.unfix(epoch/args.epochs): - logger.info('unfix part model.') - optimizer, lr_scheduler = build_opt_lr(model.module, cfg, args, epoch) - - lr_scheduler.step(epoch) - cur_lr = lr_scheduler.get_cur_lr() - - logger.info('epoch:{}'.format(epoch)) - - tb_index = iter - if iter % num_per_epoch == 0 and iter != 0: - for idx, pg in enumerate(optimizer.param_groups): - logger.info("epoch {} lr {}".format(epoch, pg['lr'])) - tb_writer.add_scalar('lr/group%d'%(idx+1), pg['lr'], tb_index) - - data_time = time.time() - end - avg.update(data_time=data_time) - x = { - 'cfg': cfg, - 'template': torch.autograd.Variable(input[0]).cuda(), - 'search': torch.autograd.Variable(input[1]).cuda(), - 'label_cls': torch.autograd.Variable(input[2]).cuda(), - 'label_loc': torch.autograd.Variable(input[3]).cuda(), - 'label_loc_weight': torch.autograd.Variable(input[4]).cuda(), - } - - optimizer.zero_grad() - outputs = model(x) - - rpn_cls_loss, rpn_loc_loss = outputs['losses'] - - rpn_cls_loss, rpn_loc_loss = torch.mean(rpn_cls_loss), torch.mean(rpn_loc_loss) - - cls_weight, reg_weight = cfg['loss']['weight'] - - loss = rpn_cls_loss * cls_weight + rpn_loc_loss * reg_weight - - loss.backward() - - if cfg['clip']['split']: - torch.nn.utils.clip_grad_norm_(model.module.features.parameters(), cfg['clip']['feature']) - torch.nn.utils.clip_grad_norm_(model.module.rpn_model.parameters(), cfg['clip']['rpn']) - else: - torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) # gradient clip - - siamrpn_loss = loss.item() - - if is_valid_number(siamrpn_loss): - optimizer.step() - - batch_time = time.time() - end - - avg.update(batch_time=batch_time, rpn_cls_loss=rpn_cls_loss, - rpn_loc_loss=rpn_loc_loss, siamrpn_loss=siamrpn_loss) - - tb_writer.add_scalar('loss/cls', rpn_cls_loss, tb_index) - tb_writer.add_scalar('loss/loc', rpn_loc_loss, tb_index) - - end = time.time() - - if (iter + 1) % args.print_freq == 0: - logger.info('Epoch: [{0}][{1}/{2}] lr: {lr:.6f}\t{batch_time:s}\t{data_time:s}' - '\t{rpn_cls_loss:s}\t{rpn_loc_loss:s}\t{siamrpn_loss:s}'.format( - epoch+1, (iter + 1) % num_per_epoch, num_per_epoch, lr=cur_lr, - batch_time=avg.batch_time, data_time=avg.data_time, rpn_cls_loss=avg.rpn_cls_loss, - rpn_loc_loss=avg.rpn_loc_loss, siamrpn_loss=avg.siamrpn_loss)) - print_speed(iter + 1, avg.batch_time.avg, args.epochs * num_per_epoch) - - -def save_checkpoint(state, is_best, filename='checkpoint.pth', best_file='model_best.pth'): - torch.save(state, filename) - if is_best: - shutil.copyfile(filename, best_file) - - -if __name__ == '__main__': - main() diff --git a/spaces/open-spaced-repetition/fsrs4anki_app/utilities.py b/spaces/open-spaced-repetition/fsrs4anki_app/utilities.py deleted file mode 100644 index 1516780b63bf0532ee605ff61d1479ca0f49e679..0000000000000000000000000000000000000000 --- a/spaces/open-spaced-repetition/fsrs4anki_app/utilities.py +++ /dev/null @@ -1,26 +0,0 @@ -from zipfile import ZipFile -import os -from pathlib import Path - - -# Extract the collection file or deck file to get the .anki21 database. -def extract(file, prefix): - proj_dir = Path( - f'projects/{prefix}_{file.orig_name.replace(".", "_").replace("@", "_")}' - ) - with ZipFile(file, "r") as zip_ref: - zip_ref.extractall(proj_dir) - # print(f"Extracted {file.orig_name} successfully!") - return proj_dir - - -def cleanup(proj_dir: Path, files): - """ - Delete all files in prefix that dont have filenames in files - :param proj_dir: - :param files: - :return: - """ - for file in proj_dir.glob("*"): - if file.name not in files: - os.remove(file) diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/vq_diffusion.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/vq_diffusion.md deleted file mode 100644 index 5441d1d579ff2209b332243b3a086b057d1f4af4..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/vq_diffusion.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# VQ Diffusion - -[Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://huggingface.co/papers/2111.14822) is by Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, Baining Guo. - -The abstract from the paper is: - -*We present the vector quantized diffusion (VQ-Diffusion) model for text-to-image generation. This method is based on a vector quantized variational autoencoder (VQ-VAE) whose latent space is modeled by a conditional variant of the recently developed Denoising Diffusion Probabilistic Model (DDPM). We find that this latent-space method is well-suited for text-to-image generation tasks because it not only eliminates the unidirectional bias with existing methods but also allows us to incorporate a mask-and-replace diffusion strategy to avoid the accumulation of errors, which is a serious problem with existing methods. Our experiments show that the VQ-Diffusion produces significantly better text-to-image generation results when compared with conventional autoregressive (AR) models with similar numbers of parameters. Compared with previous GAN-based text-to-image methods, our VQ-Diffusion can handle more complex scenes and improve the synthesized image quality by a large margin. Finally, we show that the image generation computation in our method can be made highly efficient by reparameterization. With traditional AR methods, the text-to-image generation time increases linearly with the output image resolution and hence is quite time consuming even for normal size images. The VQ-Diffusion allows us to achieve a better trade-off between quality and speed. Our experiments indicate that the VQ-Diffusion model with the reparameterization is fifteen times faster than traditional AR methods while achieving a better image quality.* - -The original codebase can be found at [microsoft/VQ-Diffusion](https://github.com/microsoft/VQ-Diffusion). - - - -Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## VQDiffusionPipeline -[[autodoc]] VQDiffusionPipeline - - all - - __call__ - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/diffedit.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/diffedit.md deleted file mode 100644 index 4c32eb4c482b86a86004c2870b79f307dc5553e5..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/diffedit.md +++ /dev/null @@ -1,262 +0,0 @@ -# DiffEdit - -[[open-in-colab]] - -Image editing typically requires providing a mask of the area to be edited. DiffEdit automatically generates the mask for you based on a text query, making it easier overall to create a mask without image editing software. The DiffEdit algorithm works in three steps: - -1. the diffusion model denoises an image conditioned on some query text and reference text which produces different noise estimates for different areas of the image; the difference is used to infer a mask to identify which area of the image needs to be changed to match the query text -2. the input image is encoded into latent space with DDIM -3. the latents are decoded with the diffusion model conditioned on the text query, using the mask as a guide such that pixels outside the mask remain the same as in the input image - -This guide will show you how to use DiffEdit to edit images without manually creating a mask. - -Before you begin, make sure you have the following libraries installed: - -```py -# uncomment to install the necessary libraries in Colab -#!pip install diffusers transformers accelerate safetensors -``` - -The [`StableDiffusionDiffEditPipeline`] requires an image mask and a set of partially inverted latents. The image mask is generated from the [`~StableDiffusionDiffEditPipeline.generate_mask`] function, and includes two parameters, `source_prompt` and `target_prompt`. These parameters determine what to edit in the image. For example, if you want to change a bowl of *fruits* to a bowl of *pears*, then: - -```py -source_prompt = "a bowl of fruits" -target_prompt = "a bowl of pears" -``` - -The partially inverted latents are generated from the [`~StableDiffusionDiffEditPipeline.invert`] function, and it is generally a good idea to include a `prompt` or *caption* describing the image to help guide the inverse latent sampling process. The caption can often be your `source_prompt`, but feel free to experiment with other text descriptions! - -Let's load the pipeline, scheduler, inverse scheduler, and enable some optimizations to reduce memory usage: - -```py -import torch -from diffusers import DDIMScheduler, DDIMInverseScheduler, StableDiffusionDiffEditPipeline - -pipeline = StableDiffusionDiffEditPipeline.from_pretrained( - "stabilityai/stable-diffusion-2-1", - torch_dtype=torch.float16, - safety_checker=None, - use_safetensors=True, -) -pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) -pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) -pipeline.enable_model_cpu_offload() -pipeline.enable_vae_slicing() -``` - -Load the image to edit: - -```py -from diffusers.utils import load_image - -img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" -raw_image = load_image(img_url).convert("RGB").resize((768, 768)) -``` - -Use the [`~StableDiffusionDiffEditPipeline.generate_mask`] function to generate the image mask. You'll need to pass it the `source_prompt` and `target_prompt` to specify what to edit in the image: - -```py -source_prompt = "a bowl of fruits" -target_prompt = "a basket of pears" -mask_image = pipeline.generate_mask( - image=raw_image, - source_prompt=source_prompt, - target_prompt=target_prompt, -) -``` - -Next, create the inverted latents and pass it a caption describing the image: - -```py -inv_latents = pipeline.invert(prompt=source_prompt, image=raw_image).latents -``` - -Finally, pass the image mask and inverted latents to the pipeline. The `target_prompt` becomes the `prompt` now, and the `source_prompt` is used as the `negative_prompt`: - -```py -image = pipeline( - prompt=target_prompt, - mask_image=mask_image, - image_latents=inv_latents, - negative_prompt=source_prompt, -).images[0] -image.save("edited_image.png") -``` - -
    -
    - -
    original image
    -
    -
    - -
    edited image
    -
    -
    - -## Generate source and target embeddings - -The source and target embeddings can be automatically generated with the [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5) model instead of creating them manually. - -Load the Flan-T5 model and tokenizer from the 🤗 Transformers library: - -```py -import torch -from transformers import AutoTokenizer, T5ForConditionalGeneration - -tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xl") -model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto", torch_dtype=torch.float16) -``` - -Provide some initial text to prompt the model to generate the source and target prompts. - -```py -source_concept = "bowl" -target_concept = "basket" - -source_text = f"Provide a caption for images containing a {source_concept}. " -"The captions should be in English and should be no longer than 150 characters." - -target_text = f"Provide a caption for images containing a {target_concept}. " -"The captions should be in English and should be no longer than 150 characters." -``` - -Next, create a utility function to generate the prompts: - -```py -@torch.no_grad -def generate_prompts(input_prompt): - input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids.to("cuda") - - outputs = model.generate( - input_ids, temperature=0.8, num_return_sequences=16, do_sample=True, max_new_tokens=128, top_k=10 - ) - return tokenizer.batch_decode(outputs, skip_special_tokens=True) - -source_prompts = generate_prompts(source_text) -target_prompts = generate_prompts(target_text) -print(source_prompts) -print(target_prompts) -``` - - - -Check out the [generation strategy](https://huggingface.co/docs/transformers/main/en/generation_strategies) guide if you're interested in learning more about strategies for generating different quality text. - - - -Load the text encoder model used by the [`StableDiffusionDiffEditPipeline`] to encode the text. You'll use the text encoder to compute the text embeddings: - -```py -import torch -from diffusers import StableDiffusionDiffEditPipeline - -pipeline = StableDiffusionDiffEditPipeline.from_pretrained( - "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, use_safetensors=True -).to("cuda") -pipeline.enable_model_cpu_offload() -pipeline.enable_vae_slicing() - -@torch.no_grad() -def embed_prompts(sentences, tokenizer, text_encoder, device="cuda"): - embeddings = [] - for sent in sentences: - text_inputs = tokenizer( - sent, - padding="max_length", - max_length=tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=None)[0] - embeddings.append(prompt_embeds) - return torch.concatenate(embeddings, dim=0).mean(dim=0).unsqueeze(0) - -source_embeds = embed_prompts(source_prompts, pipeline.tokenizer, pipeline.text_encoder) -target_embeds = embed_prompts(target_prompts, pipeline.tokenizer, pipeline.text_encoder) -``` - -Finally, pass the embeddings to the [`~StableDiffusionDiffEditPipeline.generate_mask`] and [`~StableDiffusionDiffEditPipeline.invert`] functions, and pipeline to generate the image: - -```diff - from diffusers import DDIMInverseScheduler, DDIMScheduler - from diffusers.utils import load_image - - pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) - pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) - - img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" - raw_image = load_image(img_url).convert("RGB").resize((768, 768)) - - - mask_image = pipeline.generate_mask( - image=raw_image, -+ source_prompt_embeds=source_embeds, -+ target_prompt_embeds=target_embeds, - ) - - inv_latents = pipeline.invert( -+ prompt_embeds=source_embeds, - image=raw_image, - ).latents - - images = pipeline( - mask_image=mask_image, - image_latents=inv_latents, -+ prompt_embeds=target_embeds, -+ negative_prompt_embeds=source_embeds, - ).images - images[0].save("edited_image.png") -``` - -## Generate a caption for inversion - -While you can use the `source_prompt` as a caption to help generate the partially inverted latents, you can also use the [BLIP](https://huggingface.co/docs/transformers/model_doc/blip) model to automatically generate a caption. - -Load the BLIP model and processor from the 🤗 Transformers library: - -```py -import torch -from transformers import BlipForConditionalGeneration, BlipProcessor - -processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") -model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16, low_cpu_mem_usage=True) -``` - -Create a utility function to generate a caption from the input image: - -```py -@torch.no_grad() -def generate_caption(images, caption_generator, caption_processor): - text = "a photograph of" - - inputs = caption_processor(images, text, return_tensors="pt").to(device="cuda", dtype=caption_generator.dtype) - caption_generator.to("cuda") - outputs = caption_generator.generate(**inputs, max_new_tokens=128) - - # offload caption generator - caption_generator.to("cpu") - - caption = caption_processor.batch_decode(outputs, skip_special_tokens=True)[0] - return caption -``` - -Load an input image and generate a caption for it using the `generate_caption` function: - -```py -from diffusers.utils import load_image - -img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" -raw_image = load_image(img_url).convert("RGB").resize((768, 768)) -caption = generate_caption(raw_image, model, processor) -``` - -
    -
    - -
    generated caption: "a photograph of a bowl of fruit on a table"
    -
    -
    - -Now you can drop the caption into the [`~StableDiffusionDiffEditPipeline.invert`] function to generate the partially inverted latents! \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/unconditional_image_generation.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/unconditional_image_generation.md deleted file mode 100644 index 3893f7cce276533682ddc7e1418fc8dad95fdb5b..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/unconditional_image_generation.md +++ /dev/null @@ -1,69 +0,0 @@ - - -# Unconditional image generation - -[[open-in-colab]] - -Unconditional image generation is a relatively straightforward task. The model only generates images - without any additional context like text or an image - resembling the training data it was trained on. - -The [`DiffusionPipeline`] is the easiest way to use a pre-trained diffusion system for inference. - -Start by creating an instance of [`DiffusionPipeline`] and specify which pipeline checkpoint you would like to download. -You can use any of the 🧨 Diffusers [checkpoints](https://huggingface.co/models?library=diffusers&sort=downloads) from the Hub (the checkpoint you'll use generates images of butterflies). - - - -💡 Want to train your own unconditional image generation model? Take a look at the training [guide](training/unconditional_training) to learn how to generate your own images. - - - -In this guide, you'll use [`DiffusionPipeline`] for unconditional image generation with [DDPM](https://arxiv.org/abs/2006.11239): - -```python ->>> from diffusers import DiffusionPipeline - ->>> generator = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128", use_safetensors=True) -``` - -The [`DiffusionPipeline`] downloads and caches all modeling, tokenization, and scheduling components. -Because the model consists of roughly 1.4 billion parameters, we strongly recommend running it on a GPU. -You can move the generator object to a GPU, just like you would in PyTorch: - -```python ->>> generator.to("cuda") -``` - -Now you can use the `generator` to generate an image: - -```python ->>> image = generator().images[0] -``` - -The output is by default wrapped into a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) object. - -You can save the image by calling: - -```python ->>> image.save("generated_image.png") -``` - -Try out the Spaces below, and feel free to play around with the inference steps parameter to see how it affects the image quality! - - - - diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/text_to_image/train_text_to_image.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/text_to_image/train_text_to_image.py deleted file mode 100644 index 0d14e6ccd548df5392836c1dcc2a81ca417e8e7f..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/text_to_image/train_text_to_image.py +++ /dev/null @@ -1,1091 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -import argparse -import logging -import math -import os -import random -import shutil -from pathlib import Path - -import accelerate -import datasets -import numpy as np -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -import transformers -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.state import AcceleratorState -from accelerate.utils import ProjectConfiguration, set_seed -from datasets import load_dataset -from huggingface_hub import create_repo, upload_folder -from packaging import version -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer -from transformers.utils import ContextManagers - -import diffusers -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from diffusers.training_utils import EMAModel -from diffusers.utils import check_min_version, deprecate, is_wandb_available, make_image_grid -from diffusers.utils.import_utils import is_xformers_available - - -if is_wandb_available(): - import wandb - - -# Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.22.0.dev0") - -logger = get_logger(__name__, log_level="INFO") - -DATASET_NAME_MAPPING = { - "lambdalabs/pokemon-blip-captions": ("image", "text"), -} - - -def save_model_card( - args, - repo_id: str, - images=None, - repo_folder=None, -): - img_str = "" - if len(images) > 0: - image_grid = make_image_grid(images, 1, len(args.validation_prompts)) - image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png")) - img_str += "![val_imgs_grid](./val_imgs_grid.png)\n" - - yaml = f""" ---- -license: creativeml-openrail-m -base_model: {args.pretrained_model_name_or_path} -datasets: -- {args.dataset_name} -tags: -- stable-diffusion -- stable-diffusion-diffusers -- text-to-image -- diffusers -inference: true ---- - """ - model_card = f""" -# Text-to-image finetuning - {repo_id} - -This pipeline was finetuned from **{args.pretrained_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n -{img_str} - -## Pipeline usage - -You can use the pipeline like so: - -```python -from diffusers import DiffusionPipeline -import torch - -pipeline = DiffusionPipeline.from_pretrained("{repo_id}", torch_dtype=torch.float16) -prompt = "{args.validation_prompts[0]}" -image = pipeline(prompt).images[0] -image.save("my_image.png") -``` - -## Training info - -These are the key hyperparameters used during training: - -* Epochs: {args.num_train_epochs} -* Learning rate: {args.learning_rate} -* Batch size: {args.train_batch_size} -* Gradient accumulation steps: {args.gradient_accumulation_steps} -* Image resolution: {args.resolution} -* Mixed-precision: {args.mixed_precision} - -""" - wandb_info = "" - if is_wandb_available(): - wandb_run_url = None - if wandb.run is not None: - wandb_run_url = wandb.run.url - - if wandb_run_url is not None: - wandb_info = f""" -More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}). -""" - - model_card += wandb_info - - with open(os.path.join(repo_folder, "README.md"), "w") as f: - f.write(yaml + model_card) - - -def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch): - logger.info("Running validation... ") - - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - vae=accelerator.unwrap_model(vae), - text_encoder=accelerator.unwrap_model(text_encoder), - tokenizer=tokenizer, - unet=accelerator.unwrap_model(unet), - safety_checker=None, - revision=args.revision, - torch_dtype=weight_dtype, - ) - pipeline = pipeline.to(accelerator.device) - pipeline.set_progress_bar_config(disable=True) - - if args.enable_xformers_memory_efficient_attention: - pipeline.enable_xformers_memory_efficient_attention() - - if args.seed is None: - generator = None - else: - generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) - - images = [] - for i in range(len(args.validation_prompts)): - with torch.autocast("cuda"): - image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] - - images.append(image) - - for tracker in accelerator.trackers: - if tracker.name == "tensorboard": - np_images = np.stack([np.asarray(img) for img in images]) - tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") - elif tracker.name == "wandb": - tracker.log( - { - "validation": [ - wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") - for i, image in enumerate(images) - ] - } - ) - else: - logger.warn(f"image logging not implemented for {tracker.name}") - - del pipeline - torch.cuda.empty_cache() - - return images - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1." - ) - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--revision", - type=str, - default=None, - required=False, - help="Revision of pretrained model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--dataset_name", - type=str, - default=None, - help=( - "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," - " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," - " or to a folder containing files that 🤗 Datasets can understand." - ), - ) - parser.add_argument( - "--dataset_config_name", - type=str, - default=None, - help="The config of the Dataset, leave as None if there's only one config.", - ) - parser.add_argument( - "--train_data_dir", - type=str, - default=None, - help=( - "A folder containing the training data. Folder contents must follow the structure described in" - " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" - " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." - ), - ) - parser.add_argument( - "--image_column", type=str, default="image", help="The column of the dataset containing an image." - ) - parser.add_argument( - "--caption_column", - type=str, - default="text", - help="The column of the dataset containing a caption or a list of captions.", - ) - parser.add_argument( - "--max_train_samples", - type=int, - default=None, - help=( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ), - ) - parser.add_argument( - "--validation_prompts", - type=str, - default=None, - nargs="+", - help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), - ) - parser.add_argument( - "--output_dir", - type=str, - default="sd-model-finetuned", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument( - "--cache_dir", - type=str, - default=None, - help="The directory where the downloaded models and datasets will be stored.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", - default=False, - action="store_true", - help=( - "Whether to center crop the input images to the resolution. If not set, the images will be randomly" - " cropped. The images will be resized to the resolution first before cropping." - ), - ) - parser.add_argument( - "--random_flip", - action="store_true", - help="whether to randomly flip images horizontally", - ) - parser.add_argument( - "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument("--num_train_epochs", type=int, default=100) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=1e-4, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--snr_gamma", - type=float, - default=None, - help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " - "More details here: https://arxiv.org/abs/2303.09556.", - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument( - "--allow_tf32", - action="store_true", - help=( - "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" - " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" - ), - ) - parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") - parser.add_argument( - "--non_ema_revision", - type=str, - default=None, - required=False, - help=( - "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" - " remote repository specified with --pretrained_model_name_or_path." - ), - ) - parser.add_argument( - "--dataloader_num_workers", - type=int, - default=0, - help=( - "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." - ), - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--prediction_type", - type=str, - default=None, - help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", - ) - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default=None, - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" - " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" - " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." - ), - ) - parser.add_argument( - "--report_to", - type=str, - default="tensorboard", - help=( - 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' - ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - parser.add_argument( - "--checkpointing_steps", - type=int, - default=500, - help=( - "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" - " training using `--resume_from_checkpoint`." - ), - ) - parser.add_argument( - "--checkpoints_total_limit", - type=int, - default=None, - help=("Max number of checkpoints to store."), - ) - parser.add_argument( - "--resume_from_checkpoint", - type=str, - default=None, - help=( - "Whether training should be resumed from a previous checkpoint. Use a path saved by" - ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' - ), - ) - parser.add_argument( - "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." - ) - parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") - parser.add_argument( - "--validation_epochs", - type=int, - default=5, - help="Run validation every X epochs.", - ) - parser.add_argument( - "--tracker_project_name", - type=str, - default="text2image-fine-tune", - help=( - "The `project_name` argument passed to Accelerator.init_trackers for" - " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" - ), - ) - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - # Sanity checks - if args.dataset_name is None and args.train_data_dir is None: - raise ValueError("Need either a dataset name or a training folder.") - - # default to using the same revision for the non-ema model if not specified - if args.non_ema_revision is None: - args.non_ema_revision = args.revision - - return args - - -def main(): - args = parse_args() - - if args.non_ema_revision is not None: - deprecate( - "non_ema_revision!=None", - "0.15.0", - message=( - "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" - " use `--variant=non_ema` instead." - ), - ) - logging_dir = os.path.join(args.output_dir, args.logging_dir) - - accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) - - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with=args.report_to, - project_config=accelerator_project_config, - ) - - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state, main_process_only=False) - if accelerator.is_local_main_process: - datasets.utils.logging.set_verbosity_warning() - transformers.utils.logging.set_verbosity_warning() - diffusers.utils.logging.set_verbosity_info() - else: - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() - diffusers.utils.logging.set_verbosity_error() - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - # Handle the repository creation - if accelerator.is_main_process: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - if args.push_to_hub: - repo_id = create_repo( - repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token - ).repo_id - - # Load scheduler, tokenizer and models. - noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") - tokenizer = CLIPTokenizer.from_pretrained( - args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision - ) - - def deepspeed_zero_init_disabled_context_manager(): - """ - returns either a context list that includes one that will disable zero.Init or an empty context list - """ - deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None - if deepspeed_plugin is None: - return [] - - return [deepspeed_plugin.zero3_init_context_manager(enable=False)] - - # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3. - # For this to work properly all models must be run through `accelerate.prepare`. But accelerate - # will try to assign the same optimizer with the same weights to all models during - # `deepspeed.initialize`, which of course doesn't work. - # - # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2 - # frozen models from being partitioned during `zero.Init` which gets called during - # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding - # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded. - with ContextManagers(deepspeed_zero_init_disabled_context_manager()): - text_encoder = CLIPTextModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision - ) - vae = AutoencoderKL.from_pretrained( - args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision - ) - - unet = UNet2DConditionModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision - ) - - # Freeze vae and text_encoder - vae.requires_grad_(False) - text_encoder.requires_grad_(False) - - # Create EMA for the unet. - if args.use_ema: - ema_unet = UNet2DConditionModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision - ) - ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) - - if args.enable_xformers_memory_efficient_attention: - if is_xformers_available(): - import xformers - - xformers_version = version.parse(xformers.__version__) - if xformers_version == version.parse("0.0.16"): - logger.warn( - "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." - ) - unet.enable_xformers_memory_efficient_attention() - else: - raise ValueError("xformers is not available. Make sure it is installed correctly") - - def compute_snr(timesteps): - """ - Computes SNR as per https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849 - """ - alphas_cumprod = noise_scheduler.alphas_cumprod - sqrt_alphas_cumprod = alphas_cumprod**0.5 - sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 - - # Expand the tensors. - # Adapted from https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L1026 - sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[timesteps].float() - while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape): - sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None] - alpha = sqrt_alphas_cumprod.expand(timesteps.shape) - - sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(device=timesteps.device)[timesteps].float() - while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape): - sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None] - sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape) - - # Compute SNR. - snr = (alpha / sigma) ** 2 - return snr - - # `accelerate` 0.16.0 will have better support for customized saving - if version.parse(accelerate.__version__) >= version.parse("0.16.0"): - # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format - def save_model_hook(models, weights, output_dir): - if accelerator.is_main_process: - if args.use_ema: - ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) - - for i, model in enumerate(models): - model.save_pretrained(os.path.join(output_dir, "unet")) - - # make sure to pop weight so that corresponding model is not saved again - weights.pop() - - def load_model_hook(models, input_dir): - if args.use_ema: - load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) - ema_unet.load_state_dict(load_model.state_dict()) - ema_unet.to(accelerator.device) - del load_model - - for i in range(len(models)): - # pop models so that they are not loaded again - model = models.pop() - - # load diffusers style into model - load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") - model.register_to_config(**load_model.config) - - model.load_state_dict(load_model.state_dict()) - del load_model - - accelerator.register_save_state_pre_hook(save_model_hook) - accelerator.register_load_state_pre_hook(load_model_hook) - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - - # Enable TF32 for faster training on Ampere GPUs, - # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices - if args.allow_tf32: - torch.backends.cuda.matmul.allow_tf32 = True - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Initialize the optimizer - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" - ) - - optimizer_cls = bnb.optim.AdamW8bit - else: - optimizer_cls = torch.optim.AdamW - - optimizer = optimizer_cls( - unet.parameters(), - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - # Get the datasets: you can either provide your own training and evaluation files (see below) - # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). - - # In distributed training, the load_dataset function guarantees that only one local process can concurrently - # download the dataset. - if args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - dataset = load_dataset( - args.dataset_name, - args.dataset_config_name, - cache_dir=args.cache_dir, - data_dir=args.train_data_dir, - ) - else: - data_files = {} - if args.train_data_dir is not None: - data_files["train"] = os.path.join(args.train_data_dir, "**") - dataset = load_dataset( - "imagefolder", - data_files=data_files, - cache_dir=args.cache_dir, - ) - # See more about loading custom images at - # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder - - # Preprocessing the datasets. - # We need to tokenize inputs and targets. - column_names = dataset["train"].column_names - - # 6. Get the column names for input/target. - dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) - if args.image_column is None: - image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] - else: - image_column = args.image_column - if image_column not in column_names: - raise ValueError( - f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" - ) - if args.caption_column is None: - caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] - else: - caption_column = args.caption_column - if caption_column not in column_names: - raise ValueError( - f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" - ) - - # Preprocessing the datasets. - # We need to tokenize input captions and transform the images. - def tokenize_captions(examples, is_train=True): - captions = [] - for caption in examples[caption_column]: - if isinstance(caption, str): - captions.append(caption) - elif isinstance(caption, (list, np.ndarray)): - # take a random caption if there are multiple - captions.append(random.choice(caption) if is_train else caption[0]) - else: - raise ValueError( - f"Caption column `{caption_column}` should contain either strings or lists of strings." - ) - inputs = tokenizer( - captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" - ) - return inputs.input_ids - - # Preprocessing the datasets. - train_transforms = transforms.Compose( - [ - transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), - transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def preprocess_train(examples): - images = [image.convert("RGB") for image in examples[image_column]] - examples["pixel_values"] = [train_transforms(image) for image in images] - examples["input_ids"] = tokenize_captions(examples) - return examples - - with accelerator.main_process_first(): - if args.max_train_samples is not None: - dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) - # Set the training transforms - train_dataset = dataset["train"].with_transform(preprocess_train) - - def collate_fn(examples): - pixel_values = torch.stack([example["pixel_values"] for example in examples]) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - input_ids = torch.stack([example["input_ids"] for example in examples]) - return {"pixel_values": pixel_values, "input_ids": input_ids} - - # DataLoaders creation: - train_dataloader = torch.utils.data.DataLoader( - train_dataset, - shuffle=True, - collate_fn=collate_fn, - batch_size=args.train_batch_size, - num_workers=args.dataloader_num_workers, - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, - num_training_steps=args.max_train_steps * accelerator.num_processes, - ) - - # Prepare everything with our `accelerator`. - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - if args.use_ema: - ema_unet.to(accelerator.device) - - # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision - # as these weights are only used for inference, keeping weights in full precision is not required. - weight_dtype = torch.float32 - if accelerator.mixed_precision == "fp16": - weight_dtype = torch.float16 - args.mixed_precision = accelerator.mixed_precision - elif accelerator.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - args.mixed_precision = accelerator.mixed_precision - - # Move text_encode and vae to gpu and cast to weight_dtype - text_encoder.to(accelerator.device, dtype=weight_dtype) - vae.to(accelerator.device, dtype=weight_dtype) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - tracker_config = dict(vars(args)) - tracker_config.pop("validation_prompts") - accelerator.init_trackers(args.tracker_project_name, tracker_config) - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - global_step = 0 - first_epoch = 0 - - # Potentially load in the weights and states from a previous save - if args.resume_from_checkpoint: - if args.resume_from_checkpoint != "latest": - path = os.path.basename(args.resume_from_checkpoint) - else: - # Get the most recent checkpoint - dirs = os.listdir(args.output_dir) - dirs = [d for d in dirs if d.startswith("checkpoint")] - dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) - path = dirs[-1] if len(dirs) > 0 else None - - if path is None: - accelerator.print( - f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." - ) - args.resume_from_checkpoint = None - else: - accelerator.print(f"Resuming from checkpoint {path}") - accelerator.load_state(os.path.join(args.output_dir, path)) - global_step = int(path.split("-")[1]) - - resume_global_step = global_step * args.gradient_accumulation_steps - first_epoch = global_step // num_update_steps_per_epoch - resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) - - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) - progress_bar.set_description("Steps") - - for epoch in range(first_epoch, args.num_train_epochs): - unet.train() - train_loss = 0.0 - for step, batch in enumerate(train_dataloader): - # Skip steps until we reach the resumed step - if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: - if step % args.gradient_accumulation_steps == 0: - progress_bar.update(1) - continue - - with accelerator.accumulate(unet): - # Convert images to latent space - latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() - latents = latents * vae.config.scaling_factor - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - if args.noise_offset: - # https://www.crosslabs.org//blog/diffusion-with-offset-noise - noise += args.noise_offset * torch.randn( - (latents.shape[0], latents.shape[1], 1, 1), device=latents.device - ) - if args.input_perturbation: - new_noise = noise + args.input_perturbation * torch.randn_like(noise) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - if args.input_perturbation: - noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps) - else: - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - # Get the target for loss depending on the prediction type - if args.prediction_type is not None: - # set prediction_type of scheduler if defined - noise_scheduler.register_to_config(prediction_type=args.prediction_type) - - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - # Predict the noise residual and compute loss - model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - if args.snr_gamma is None: - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - else: - # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. - # Since we predict the noise instead of x_0, the original formulation is slightly changed. - # This is discussed in Section 4.2 of the same paper. - snr = compute_snr(timesteps) - mse_loss_weights = ( - torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr - ) - if noise_scheduler.config.prediction_type == "v_prediction": - # velocity objective prediction requires SNR weights to be floored to a min value of 1. - mse_loss_weights = mse_loss_weights + 1 - # We first calculate the original loss. Then we mean over the non-batch dimensions and - # rebalance the sample-wise losses with their respective loss weights. - # Finally, we take the mean of the rebalanced loss. - loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") - loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights - loss = loss.mean() - - # Gather the losses across all processes for logging (if we use distributed training). - avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() - train_loss += avg_loss.item() / args.gradient_accumulation_steps - - # Backpropagate - accelerator.backward(loss) - if accelerator.sync_gradients: - accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - if args.use_ema: - ema_unet.step(unet.parameters()) - progress_bar.update(1) - global_step += 1 - accelerator.log({"train_loss": train_loss}, step=global_step) - train_loss = 0.0 - - if global_step % args.checkpointing_steps == 0: - if accelerator.is_main_process: - # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` - if args.checkpoints_total_limit is not None: - checkpoints = os.listdir(args.output_dir) - checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] - checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) - - # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints - if len(checkpoints) >= args.checkpoints_total_limit: - num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 - removing_checkpoints = checkpoints[0:num_to_remove] - - logger.info( - f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" - ) - logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") - - for removing_checkpoint in removing_checkpoints: - removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) - shutil.rmtree(removing_checkpoint) - - save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") - accelerator.save_state(save_path) - logger.info(f"Saved state to {save_path}") - - logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - - if global_step >= args.max_train_steps: - break - - if accelerator.is_main_process: - if args.validation_prompts is not None and epoch % args.validation_epochs == 0: - if args.use_ema: - # Store the UNet parameters temporarily and load the EMA parameters to perform inference. - ema_unet.store(unet.parameters()) - ema_unet.copy_to(unet.parameters()) - log_validation( - vae, - text_encoder, - tokenizer, - unet, - args, - accelerator, - weight_dtype, - global_step, - ) - if args.use_ema: - # Switch back to the original UNet parameters. - ema_unet.restore(unet.parameters()) - - # Create the pipeline using the trained modules and save it. - accelerator.wait_for_everyone() - if accelerator.is_main_process: - unet = accelerator.unwrap_model(unet) - if args.use_ema: - ema_unet.copy_to(unet.parameters()) - - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - text_encoder=text_encoder, - vae=vae, - unet=unet, - revision=args.revision, - ) - pipeline.save_pretrained(args.output_dir) - - # Run a final round of inference. - images = [] - if args.validation_prompts is not None: - logger.info("Running inference for collecting generated images...") - pipeline = pipeline.to(accelerator.device) - pipeline.torch_dtype = weight_dtype - pipeline.set_progress_bar_config(disable=True) - - if args.enable_xformers_memory_efficient_attention: - pipeline.enable_xformers_memory_efficient_attention() - - if args.seed is None: - generator = None - else: - generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) - - for i in range(len(args.validation_prompts)): - with torch.autocast("cuda"): - image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] - images.append(image) - - if args.push_to_hub: - save_model_card(args, repo_id, images, repo_folder=args.output_dir) - upload_folder( - repo_id=repo_id, - folder_path=args.output_dir, - commit_message="End of training", - ignore_patterns=["step_*", "epoch_*"], - ) - - accelerator.end_training() - - -if __name__ == "__main__": - main() diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py deleted file mode 100644 index 85c47f13b3005b62799248e49dd961b30aa81fa6..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +++ /dev/null @@ -1,1191 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import inspect -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -import numpy as np -import PIL.Image -import torch -import torch.nn.functional as F -from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer - -from diffusers.utils.import_utils import is_invisible_watermark_available - -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel -from ...models.attention_processor import ( - AttnProcessor2_0, - LoRAAttnProcessor2_0, - LoRAXFormersAttnProcessor, - XFormersAttnProcessor, -) -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( - logging, - replace_example_docstring, -) -from ...utils.torch_utils import is_compiled_module, randn_tensor -from ..pipeline_utils import DiffusionPipeline -from ..stable_diffusion_xl import StableDiffusionXLPipelineOutput - - -if is_invisible_watermark_available(): - from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker - -from .multicontrolnet import MultiControlNetModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> # !pip install opencv-python transformers accelerate - >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL - >>> from diffusers.utils import load_image - >>> import numpy as np - >>> import torch - - >>> import cv2 - >>> from PIL import Image - - >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" - >>> negative_prompt = "low quality, bad quality, sketches" - - >>> # download an image - >>> image = load_image( - ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" - ... ) - - >>> # initialize the models and pipeline - >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization - >>> controlnet = ControlNetModel.from_pretrained( - ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 - ... ) - >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) - >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained( - ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 - ... ) - >>> pipe.enable_model_cpu_offload() - - >>> # get canny image - >>> image = np.array(image) - >>> image = cv2.Canny(image, 100, 200) - >>> image = image[:, :, None] - >>> image = np.concatenate([image, image, image], axis=2) - >>> canny_image = Image.fromarray(image) - - >>> # generate image - >>> image = pipe( - ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image - ... ).images[0] - ``` -""" - - -class StableDiffusionXLControlNetPipeline( - DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin -): - r""" - Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - The pipeline also inherits the following loading methods: - - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - - [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - - [`loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. - text_encoder ([`~transformers.CLIPTextModel`]): - Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). - text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): - Second frozen text-encoder - ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). - tokenizer ([`~transformers.CLIPTokenizer`]): - A `CLIPTokenizer` to tokenize text. - tokenizer_2 ([`~transformers.CLIPTokenizer`]): - A `CLIPTokenizer` to tokenize text. - unet ([`UNet2DConditionModel`]): - A `UNet2DConditionModel` to denoise the encoded image latents. - controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): - Provides additional conditioning to the `unet` during the denoising process. If you set multiple - ControlNets as a list, the outputs from each ControlNet are added together to create one combined - additional conditioning. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): - Whether the negative prompt embeddings should always be set to 0. Also see the config of - `stabilityai/stable-diffusion-xl-base-1-0`. - add_watermarker (`bool`, *optional*): - Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to - watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no - watermarker is used. - """ - model_cpu_offload_seq = ( - "text_encoder->text_encoder_2->unet->vae" # leave controlnet out on purpose because it iterates with unet - ) - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - text_encoder_2: CLIPTextModelWithProjection, - tokenizer: CLIPTokenizer, - tokenizer_2: CLIPTokenizer, - unet: UNet2DConditionModel, - controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], - scheduler: KarrasDiffusionSchedulers, - force_zeros_for_empty_prompt: bool = True, - add_watermarker: Optional[bool] = None, - ): - super().__init__() - - if isinstance(controlnet, (list, tuple)): - controlnet = MultiControlNetModel(controlnet) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - text_encoder_2=text_encoder_2, - tokenizer=tokenizer, - tokenizer_2=tokenizer_2, - unet=unet, - controlnet=controlnet, - scheduler=scheduler, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) - self.control_image_processor = VaeImageProcessor( - vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False - ) - add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() - - if add_watermarker: - self.watermark = StableDiffusionXLWatermarker() - else: - self.watermark = None - - self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing - def enable_vae_slicing(self): - r""" - Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to - compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. - """ - self.vae.enable_slicing() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing - def disable_vae_slicing(self): - r""" - Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_slicing() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling - def enable_vae_tiling(self): - r""" - Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to - compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow - processing larger images. - """ - self.vae.enable_tiling() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling - def disable_vae_tiling(self): - r""" - Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_tiling() - - # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt - def encode_prompt( - self, - prompt: str, - prompt_2: Optional[str] = None, - device: Optional[torch.device] = None, - num_images_per_prompt: int = 1, - do_classifier_free_guidance: bool = True, - negative_prompt: Optional[str] = None, - negative_prompt_2: Optional[str] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - pooled_prompt_embeds: Optional[torch.FloatTensor] = None, - negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, - lora_scale: Optional[float] = None, - clip_skip: Optional[int] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - prompt_2 (`str` or `List[str]`, *optional*): - The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is - used in both text-encoders - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - negative_prompt_2 (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and - `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - pooled_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. - If not provided, pooled text embeddings will be generated from `prompt` input argument. - negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` - input argument. - lora_scale (`float`, *optional*): - A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. - clip_skip (`int`, *optional*): - Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that - the output of the pre-final layer will be used for computing the prompt embeddings. - """ - device = device or self._execution_device - - # set lora scale so that monkey patched LoRA - # function of text encoder can correctly access it - if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): - self._lora_scale = lora_scale - - # dynamically adjust the LoRA scale - adjust_lora_scale_text_encoder(self.text_encoder, lora_scale, self.use_peft_backend) - adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale, self.use_peft_backend) - - prompt = [prompt] if isinstance(prompt, str) else prompt - - if prompt is not None: - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - # Define tokenizers and text encoders - tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] - text_encoders = ( - [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] - ) - - if prompt_embeds is None: - prompt_2 = prompt_2 or prompt - prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 - - # textual inversion: procecss multi-vector tokens if necessary - prompt_embeds_list = [] - prompts = [prompt, prompt_2] - for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): - if isinstance(self, TextualInversionLoaderMixin): - prompt = self.maybe_convert_prompt(prompt, tokenizer) - - text_inputs = tokenizer( - prompt, - padding="max_length", - max_length=tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - - text_input_ids = text_inputs.input_ids - untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {tokenizer.model_max_length} tokens: {removed_text}" - ) - - prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) - - # We are only ALWAYS interested in the pooled output of the final text encoder - pooled_prompt_embeds = prompt_embeds[0] - if clip_skip is None: - prompt_embeds = prompt_embeds.hidden_states[-2] - else: - # "2" because SDXL always indexes from the penultimate layer. - prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] - - prompt_embeds_list.append(prompt_embeds) - - prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) - - # get unconditional embeddings for classifier free guidance - zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt - if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: - negative_prompt_embeds = torch.zeros_like(prompt_embeds) - negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) - elif do_classifier_free_guidance and negative_prompt_embeds is None: - negative_prompt = negative_prompt or "" - negative_prompt_2 = negative_prompt_2 or negative_prompt - - # normalize str to list - negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt - negative_prompt_2 = ( - batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 - ) - - uncond_tokens: List[str] - if prompt is not None and type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = [negative_prompt, negative_prompt_2] - - negative_prompt_embeds_list = [] - for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): - if isinstance(self, TextualInversionLoaderMixin): - negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) - - max_length = prompt_embeds.shape[1] - uncond_input = tokenizer( - negative_prompt, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - negative_prompt_embeds = text_encoder( - uncond_input.input_ids.to(device), - output_hidden_states=True, - ) - # We are only ALWAYS interested in the pooled output of the final text encoder - negative_pooled_prompt_embeds = negative_prompt_embeds[0] - negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] - - negative_prompt_embeds_list.append(negative_prompt_embeds) - - negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) - - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( - bs_embed * num_images_per_prompt, -1 - ) - if do_classifier_free_guidance: - negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( - bs_embed * num_images_per_prompt, -1 - ) - - return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs( - self, - prompt, - prompt_2, - image, - callback_steps, - negative_prompt=None, - negative_prompt_2=None, - prompt_embeds=None, - negative_prompt_embeds=None, - pooled_prompt_embeds=None, - negative_pooled_prompt_embeds=None, - controlnet_conditioning_scale=1.0, - control_guidance_start=0.0, - control_guidance_end=1.0, - ): - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt_2 is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): - raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - elif negative_prompt_2 is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - if prompt_embeds is not None and pooled_prompt_embeds is None: - raise ValueError( - "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." - ) - - if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: - raise ValueError( - "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." - ) - - # `prompt` needs more sophisticated handling when there are multiple - # conditionings. - if isinstance(self.controlnet, MultiControlNetModel): - if isinstance(prompt, list): - logger.warning( - f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" - " prompts. The conditionings will be fixed across the prompts." - ) - - # Check `image` - is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( - self.controlnet, torch._dynamo.eval_frame.OptimizedModule - ) - if ( - isinstance(self.controlnet, ControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, ControlNetModel) - ): - self.check_image(image, prompt, prompt_embeds) - elif ( - isinstance(self.controlnet, MultiControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, MultiControlNetModel) - ): - if not isinstance(image, list): - raise TypeError("For multiple controlnets: `image` must be type `list`") - - # When `image` is a nested list: - # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) - elif any(isinstance(i, list) for i in image): - raise ValueError("A single batch of multiple conditionings are supported at the moment.") - elif len(image) != len(self.controlnet.nets): - raise ValueError( - f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." - ) - - for image_ in image: - self.check_image(image_, prompt, prompt_embeds) - else: - assert False - - # Check `controlnet_conditioning_scale` - if ( - isinstance(self.controlnet, ControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, ControlNetModel) - ): - if not isinstance(controlnet_conditioning_scale, float): - raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") - elif ( - isinstance(self.controlnet, MultiControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, MultiControlNetModel) - ): - if isinstance(controlnet_conditioning_scale, list): - if any(isinstance(i, list) for i in controlnet_conditioning_scale): - raise ValueError("A single batch of multiple conditionings are supported at the moment.") - elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( - self.controlnet.nets - ): - raise ValueError( - "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" - " the same length as the number of controlnets" - ) - else: - assert False - - if not isinstance(control_guidance_start, (tuple, list)): - control_guidance_start = [control_guidance_start] - - if not isinstance(control_guidance_end, (tuple, list)): - control_guidance_end = [control_guidance_end] - - if len(control_guidance_start) != len(control_guidance_end): - raise ValueError( - f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." - ) - - if isinstance(self.controlnet, MultiControlNetModel): - if len(control_guidance_start) != len(self.controlnet.nets): - raise ValueError( - f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." - ) - - for start, end in zip(control_guidance_start, control_guidance_end): - if start >= end: - raise ValueError( - f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." - ) - if start < 0.0: - raise ValueError(f"control guidance start: {start} can't be smaller than 0.") - if end > 1.0: - raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") - - # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image - def check_image(self, image, prompt, prompt_embeds): - image_is_pil = isinstance(image, PIL.Image.Image) - image_is_tensor = isinstance(image, torch.Tensor) - image_is_np = isinstance(image, np.ndarray) - image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) - image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) - image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) - - if ( - not image_is_pil - and not image_is_tensor - and not image_is_np - and not image_is_pil_list - and not image_is_tensor_list - and not image_is_np_list - ): - raise TypeError( - f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" - ) - - if image_is_pil: - image_batch_size = 1 - else: - image_batch_size = len(image) - - if prompt is not None and isinstance(prompt, str): - prompt_batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - prompt_batch_size = len(prompt) - elif prompt_embeds is not None: - prompt_batch_size = prompt_embeds.shape[0] - - if image_batch_size != 1 and image_batch_size != prompt_batch_size: - raise ValueError( - f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" - ) - - # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image - def prepare_image( - self, - image, - width, - height, - batch_size, - num_images_per_prompt, - device, - dtype, - do_classifier_free_guidance=False, - guess_mode=False, - ): - image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) - image_batch_size = image.shape[0] - - if image_batch_size == 1: - repeat_by = batch_size - else: - # image batch size is the same as prompt batch size - repeat_by = num_images_per_prompt - - image = image.repeat_interleave(repeat_by, dim=0) - - image = image.to(device=device, dtype=dtype) - - if do_classifier_free_guidance and not guess_mode: - image = torch.cat([image] * 2) - - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents - def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids - def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): - add_time_ids = list(original_size + crops_coords_top_left + target_size) - - passed_add_embed_dim = ( - self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim - ) - expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features - - if expected_add_embed_dim != passed_add_embed_dim: - raise ValueError( - f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." - ) - - add_time_ids = torch.tensor([add_time_ids], dtype=dtype) - return add_time_ids - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae - def upcast_vae(self): - dtype = self.vae.dtype - self.vae.to(dtype=torch.float32) - use_torch_2_0_or_xformers = isinstance( - self.vae.decoder.mid_block.attentions[0].processor, - ( - AttnProcessor2_0, - XFormersAttnProcessor, - LoRAXFormersAttnProcessor, - LoRAAttnProcessor2_0, - ), - ) - # if xformers or torch_2_0 is used attention block does not need - # to be in float32 which can save lots of memory - if use_torch_2_0_or_xformers: - self.vae.post_quant_conv.to(dtype) - self.vae.decoder.conv_in.to(dtype) - self.vae.decoder.mid_block.to(dtype) - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Union[str, List[str]] = None, - prompt_2: Optional[Union[str, List[str]]] = None, - image: PipelineImageInput = None, - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 5.0, - negative_prompt: Optional[Union[str, List[str]]] = None, - negative_prompt_2: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - pooled_prompt_embeds: Optional[torch.FloatTensor] = None, - negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - controlnet_conditioning_scale: Union[float, List[float]] = 1.0, - guess_mode: bool = False, - control_guidance_start: Union[float, List[float]] = 0.0, - control_guidance_end: Union[float, List[float]] = 1.0, - original_size: Tuple[int, int] = None, - crops_coords_top_left: Tuple[int, int] = (0, 0), - target_size: Tuple[int, int] = None, - negative_original_size: Optional[Tuple[int, int]] = None, - negative_crops_coords_top_left: Tuple[int, int] = (0, 0), - negative_target_size: Optional[Tuple[int, int]] = None, - clip_skip: Optional[int] = None, - ): - r""" - The call function to the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. - prompt_2 (`str` or `List[str]`, *optional*): - The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is - used in both text-encoders. - image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: - `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): - The ControlNet input condition to provide guidance to the `unet` for generation. If the type is - specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be - accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height - and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in - `init`, images must be passed as a list such that each element of the list can be correctly batched for - input to a single ControlNet. - height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The height in pixels of the generated image. Anything below 512 pixels won't work well for - [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) - and checkpoints that are not specifically fine-tuned on low resolutions. - width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The width in pixels of the generated image. Anything below 512 pixels won't work well for - [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) - and checkpoints that are not specifically fine-tuned on low resolutions. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 5.0): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide what to not include in image generation. If not defined, you need to - pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). - negative_prompt_2 (`str` or `List[str]`, *optional*): - The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` - and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies - to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not - provided, text embeddings are generated from the `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If - not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. - pooled_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If - not provided, pooled text embeddings are generated from `prompt` input argument. - negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt - weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in - [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). - controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): - The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added - to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set - the corresponding scale as a list. - guess_mode (`bool`, *optional*, defaults to `False`): - The ControlNet encoder tries to recognize the content of the input image even if you remove all - prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. - control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): - The percentage of total steps at which the ControlNet starts applying. - control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): - The percentage of total steps at which the ControlNet stops applying. - original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): - If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. - `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as - explained in section 2.2 of - [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). - crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): - `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position - `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting - `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of - [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). - target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): - For most cases, `target_size` should be set to the desired height and width of the generated image. If - not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in - section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). - negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): - To negatively condition the generation process based on a specific image resolution. Part of SDXL's - micro-conditioning as explained in section 2.2 of - [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more - information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. - negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): - To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's - micro-conditioning as explained in section 2.2 of - [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more - information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. - negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): - To negatively condition the generation process based on a target image resolution. It should be as same - as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of - [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more - information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. - clip_skip (`int`, *optional*): - Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that - the output of the pre-final layer will be used for computing the prompt embeddings. - - Examples: - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, - otherwise a `tuple` is returned containing the output images. - """ - controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet - - # align format for control guidance - if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): - control_guidance_start = len(control_guidance_end) * [control_guidance_start] - elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): - control_guidance_end = len(control_guidance_start) * [control_guidance_end] - elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): - mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 - control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [ - control_guidance_end - ] - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, - prompt_2, - image, - callback_steps, - negative_prompt, - negative_prompt_2, - prompt_embeds, - negative_prompt_embeds, - pooled_prompt_embeds, - negative_pooled_prompt_embeds, - controlnet_conditioning_scale, - control_guidance_start, - control_guidance_end, - ) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): - controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) - - global_pool_conditions = ( - controlnet.config.global_pool_conditions - if isinstance(controlnet, ControlNetModel) - else controlnet.nets[0].config.global_pool_conditions - ) - guess_mode = guess_mode or global_pool_conditions - - # 3. Encode input prompt - text_encoder_lora_scale = ( - cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None - ) - ( - prompt_embeds, - negative_prompt_embeds, - pooled_prompt_embeds, - negative_pooled_prompt_embeds, - ) = self.encode_prompt( - prompt, - prompt_2, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - negative_prompt_2, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - pooled_prompt_embeds=pooled_prompt_embeds, - negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, - lora_scale=text_encoder_lora_scale, - clip_skip=clip_skip, - ) - - # 4. Prepare image - if isinstance(controlnet, ControlNetModel): - image = self.prepare_image( - image=image, - width=width, - height=height, - batch_size=batch_size * num_images_per_prompt, - num_images_per_prompt=num_images_per_prompt, - device=device, - dtype=controlnet.dtype, - do_classifier_free_guidance=do_classifier_free_guidance, - guess_mode=guess_mode, - ) - height, width = image.shape[-2:] - elif isinstance(controlnet, MultiControlNetModel): - images = [] - - for image_ in image: - image_ = self.prepare_image( - image=image_, - width=width, - height=height, - batch_size=batch_size * num_images_per_prompt, - num_images_per_prompt=num_images_per_prompt, - device=device, - dtype=controlnet.dtype, - do_classifier_free_guidance=do_classifier_free_guidance, - guess_mode=guess_mode, - ) - - images.append(image_) - - image = images - height, width = image[0].shape[-2:] - else: - assert False - - # 5. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # 6. Prepare latent variables - num_channels_latents = self.unet.config.in_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - prompt_embeds.dtype, - device, - generator, - latents, - ) - - # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 7.1 Create tensor stating which controlnets to keep - controlnet_keep = [] - for i in range(len(timesteps)): - keeps = [ - 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) - for s, e in zip(control_guidance_start, control_guidance_end) - ] - controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) - - # 7.2 Prepare added time ids & embeddings - if isinstance(image, list): - original_size = original_size or image[0].shape[-2:] - else: - original_size = original_size or image.shape[-2:] - target_size = target_size or (height, width) - - add_text_embeds = pooled_prompt_embeds - add_time_ids = self._get_add_time_ids( - original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype - ) - - if negative_original_size is not None and negative_target_size is not None: - negative_add_time_ids = self._get_add_time_ids( - negative_original_size, - negative_crops_coords_top_left, - negative_target_size, - dtype=prompt_embeds.dtype, - ) - else: - negative_add_time_ids = add_time_ids - - if do_classifier_free_guidance: - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) - add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) - add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) - - prompt_embeds = prompt_embeds.to(device) - add_text_embeds = add_text_embeds.to(device) - add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) - - # 8. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} - - # controlnet(s) inference - if guess_mode and do_classifier_free_guidance: - # Infer ControlNet only for the conditional batch. - control_model_input = latents - control_model_input = self.scheduler.scale_model_input(control_model_input, t) - controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] - controlnet_added_cond_kwargs = { - "text_embeds": add_text_embeds.chunk(2)[1], - "time_ids": add_time_ids.chunk(2)[1], - } - else: - control_model_input = latent_model_input - controlnet_prompt_embeds = prompt_embeds - controlnet_added_cond_kwargs = added_cond_kwargs - - if isinstance(controlnet_keep[i], list): - cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] - else: - controlnet_cond_scale = controlnet_conditioning_scale - if isinstance(controlnet_cond_scale, list): - controlnet_cond_scale = controlnet_cond_scale[0] - cond_scale = controlnet_cond_scale * controlnet_keep[i] - - down_block_res_samples, mid_block_res_sample = self.controlnet( - control_model_input, - t, - encoder_hidden_states=controlnet_prompt_embeds, - controlnet_cond=image, - conditioning_scale=cond_scale, - guess_mode=guess_mode, - added_cond_kwargs=controlnet_added_cond_kwargs, - return_dict=False, - ) - - if guess_mode and do_classifier_free_guidance: - # Infered ControlNet only for the conditional batch. - # To apply the output of ControlNet to both the unconditional and conditional batches, - # add 0 to the unconditional batch to keep it unchanged. - down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] - mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) - - # predict the noise residual - noise_pred = self.unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - down_block_additional_residuals=down_block_res_samples, - mid_block_additional_residual=mid_block_res_sample, - added_cond_kwargs=added_cond_kwargs, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # manually for max memory savings - if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: - self.upcast_vae() - latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) - - if not output_type == "latent": - # make sure the VAE is in float32 mode, as it overflows in float16 - needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast - - if needs_upcasting: - self.upcast_vae() - latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) - - image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] - - # cast back to fp16 if needed - if needs_upcasting: - self.vae.to(dtype=torch.float16) - else: - image = latents - - if not output_type == "latent": - # apply watermark if available - if self.watermark is not None: - image = self.watermark.apply_watermark(image) - - image = self.image_processor.postprocess(image, output_type=output_type) - - # Offload all models - self.maybe_free_model_hooks() - - if not return_dict: - return (image,) - - return StableDiffusionXLPipelineOutput(images=image) diff --git a/spaces/peterwisu/lip_synthesis/src/models/attnlstm.py b/spaces/peterwisu/lip_synthesis/src/models/attnlstm.py deleted file mode 100644 index 4b877523a57045711fa6d82548a8b1b88bcc4e8f..0000000000000000000000000000000000000000 --- a/spaces/peterwisu/lip_synthesis/src/models/attnlstm.py +++ /dev/null @@ -1,237 +0,0 @@ -import torch -import torch.nn as nn -import math - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -class LstmGen(nn.Module): - - def __init__ (self): - super(LstmGen, self).__init__() - - self.n_values = 60 - - self.au_encoder = nn.Sequential( #input (1,80,18) - ConvBlock(1, 64, kernel_size=(3,3),stride=1, padding=0), - ResidualBlock(64,64, kernel_size=(3,3),stride=1, padding=1), - - ConvBlock(64,128, kernel_size=(5,3), stride=(3,1), padding=1), - ResidualBlock(128,128, kernel_size=(3,3), stride=(1,1), padding=1), - - ConvBlock(128,256, kernel_size=(5,3), stride=(3,3), padding=0), - ResidualBlock(256,256, kernel_size=(3,3), stride=(1,1), padding=1), - - ConvBlock(256,256, kernel_size=(3,3), stride=(3,3), padding=1), - ResidualBlock(256,256, kernel_size=(3,3), stride=(1,1), padding=1), - - ConvBlock(256,256, kernel_size=(3,2), stride=(1,1), padding=0), - - nn.Flatten() - ) - - - self.lstm_encoder = Encoder(input_size=256, hidden_size=256, num_layers=4, dropout=0.25, bidirectional=True, batch_first=False) - - self.pe = PositionalEncoding(d_model=512, dropout=0.1, max_len=5) - - self.self_attn = nn.MultiheadAttention(512, num_heads=8) - - self.feed = nn.Sequential( - LinearBlock(512+self.n_values, 256), - LinearBlock(256, 128), - LinearBlock(128, 60 , dropout=False, batchnorm=False, activation=False), - ) - - - def forward(self, au, lip): - - # AU input shape : (B, Seq, 1, 80 , 18) - - # inputs shape ( B , seq, 20 , 3) - lip = lip.reshape(lip.size(0),lip.size(1),-1) # outshape(Seq, B , 60) - - # list for seq of extract features - in_feats = [] - # length of sequence - seq_len = au.size(1) - # batch_size - batch_size = au.size(0) - - au = au.reshape(batch_size * seq_len , 1 , 80 , -1) # (Batchsize * seq , 1 , 80 (num mel) , segments ) - - in_feats = self.au_encoder(au) - - in_feats = in_feats.reshape(seq_len,batch_size, -1) - - lstm_outs , hidden, cell = self.lstm_encoder(in_feats) - - pos_out = self.pe(lstm_outs) - - - attn_out = self.self_attn(pos_out,pos_out,pos_out)[0] - - - attn_out = attn_out.reshape(-1,attn_out.shape[-1]) - - lip = lip.reshape(-1,lip.shape[-1]) - - concat_input = torch.concat((attn_out,lip),dim=1) - - pred = self.feed(concat_input) - - pred = pred.reshape(batch_size, seq_len, self.n_values) - - - return pred , lip - - -class Encoder(nn.Module): - - def __init__ (self, input_size, hidden_size, num_layers, dropout,bidirectional=True,batch_first=False): - - super(Encoder,self).__init__() - - self.lstm = nn.LSTM( - input_size=input_size, - hidden_size=hidden_size, - num_layers=num_layers, - dropout=dropout, - bidirectional=bidirectional, - batch_first=batch_first - ) - - def forward(self, inputs): - - out, (hidden, cell) = self.lstm(inputs) - - return out , hidden , cell - - -class PositionalEncoding(nn.Module): - """" - Positional Encoding from Pytorch website - """ - - def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000): - super().__init__() - self.dropout = nn.Dropout(p=dropout) - - position = torch.arange(max_len).unsqueeze(1) - div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)) - pe = torch.zeros(max_len, 1, d_model) - pe[:, 0, 0::2] = torch.sin(position * div_term) - pe[:, 0, 1::2] = torch.cos(position * div_term) - self.register_buffer('pe', pe) - - def forward(self, x): - """ - Args: - x: Tensor, shape [seq_len, batch_size, embedding_dim] - """ - x = x + self.pe[:x.size(0)] - return self.dropout(x) - - - -class LinearBlock(nn.Module): - """ - Custom Linear Layer block with regularization (Dropout and Batchnorm) and Activation function - """ - def __init__ (self, in_features, out_features, dropout=True, dropout_rate=0.2, batchnorm=True, activation=True): - - super().__init__() - - self.mlp = nn.Linear(in_features = in_features,out_features = out_features) # Linear Layer - self.activation = nn.LeakyReLU(0.2) # activation function layer - self.batchnorm = nn.BatchNorm1d(out_features) # Batch Normalization 1D layer - self.do_dropout = dropout # perform dropout - self.do_batchnorm = batchnorm # perform batchnorm - self.do_activation = activation # perform activation - self.dropout = nn.Dropout(dropout_rate) # Dropout rate - - def forward(self, x): - """ - forward propagation of this layer - """ - - outs = self.mlp(x) - - - if self.do_batchnorm: - - outs = self.batchnorm(outs) - - if self.do_activation: - - outs = self.activation(outs) - - if self.do_dropout: - - outs = self.dropout(outs) - - return outs - - - -class ConvBlock(nn.Module): - """ - Convolutional Layer (With batchnorm and activation) - """ - def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0): - - super().__init__() - - self.conv_layer = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding), - nn.BatchNorm2d(out_channels), - ) - - self.activation = nn.ReLU() - - def forward(self, inputs): - - cnn_out = self.conv_layer(inputs) - cnn_out = self.activation(cnn_out) - - return cnn_out - -class ResidualBlock(nn.Module): - """ - Convolutional Layers with Residual connection - """ - def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0): - - super().__init__() - - self.conv_layer1 = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding), - nn.BatchNorm2d(out_channels), - ) - - - self.conv_layer2 = nn.Sequential(nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding), - nn.BatchNorm2d(out_channels), - ) - - self.activation = nn.ReLU() - - - def forward(self,x): - - residual = x - # first conv layer - out = self.activation(self.conv_layer1(x)) - # second conv layer - out = self.activation(self.conv_layer2(out)) - # residual connection - out = out + residual - - return out - - - - - - - - - - diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/__init__.py b/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/__init__.py deleted file mode 100644 index d77d75f0be08138e2556d40d83ee650e921379f9..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/__init__.py +++ /dev/null @@ -1,123 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -utils/initialization -""" - - -# def notebook_init(verbose=True): -# # Check system software and hardware -# print('Checking setup...') - -# import os -# import shutil - -# from utils.general import check_requirements, emojis, is_colab -# from utils.torch_utils import select_device # imports - -# check_requirements(('psutil', 'IPython')) -# import psutil -# from IPython import display # to display images and clear console output - -# if is_colab(): -# shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory - -# # System info -# if verbose: -# gb = 1 << 30 # bytes to GiB (1024 ** 3) -# ram = psutil.virtual_memory().total -# total, used, free = shutil.disk_usage("/") -# display.clear_output() -# s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' -# else: -# s = '' - -# select_device(newline=False) -# print(emojis(f'Setup complete ✅ {s}')) -# return display - -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -utils/initialization -""" - -import contextlib -import platform -import threading - - -def emojis(str=''): - # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - - -class TryExcept(contextlib.ContextDecorator): - # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager - def __init__(self, msg=''): - self.msg = msg - - def __enter__(self): - pass - - def __exit__(self, exc_type, value, traceback): - if value: - print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) - return True - - -def threaded(func): - # Multi-threads a target function and returns thread. Usage: @threaded decorator - def wrapper(*args, **kwargs): - thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) - thread.start() - return thread - - return wrapper - - -def join_threads(verbose=False): - # Join all daemon threads, i.e. atexit.register(lambda: join_threads()) - main_thread = threading.current_thread() - for t in threading.enumerate(): - if t is not main_thread: - if verbose: - print(f'Joining thread {t.name}') - t.join() - - -def notebook_init(verbose=True): - # Check system software and hardware - print('Checking setup...') - - import os - import shutil - - from ultralytics.utils.checks import check_requirements - - from utils.general import check_font, is_colab - from utils.torch_utils import select_device # imports - - check_font() - - import psutil - - if check_requirements('wandb', install=False): - os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang - if is_colab(): - shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory - - # System info - display = None - if verbose: - gb = 1 << 30 # bytes to GiB (1024 ** 3) - ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage('/') - with contextlib.suppress(Exception): # clear display if ipython is installed - from IPython import display - display.clear_output() - s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' - else: - s = '' - - select_device(newline=False) - print(emojis(f'Setup complete ✅ {s}')) - return display diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/__init__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/__init__.py deleted file mode 100644 index b22f7abb93b9d7aeee50829b35746aaa3f9f5feb..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/__init__.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -pip._vendor is for vendoring dependencies of pip to prevent needing pip to -depend on something external. - -Files inside of pip._vendor should be considered immutable and should only be -updated to versions from upstream. -""" -from __future__ import absolute_import - -import glob -import os.path -import sys - -# Downstream redistributors which have debundled our dependencies should also -# patch this value to be true. This will trigger the additional patching -# to cause things like "six" to be available as pip. -DEBUNDLED = False - -# By default, look in this directory for a bunch of .whl files which we will -# add to the beginning of sys.path before attempting to import anything. This -# is done to support downstream re-distributors like Debian and Fedora who -# wish to create their own Wheels for our dependencies to aid in debundling. -WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) - - -# Define a small helper function to alias our vendored modules to the real ones -# if the vendored ones do not exist. This idea of this was taken from -# https://github.com/kennethreitz/requests/pull/2567. -def vendored(modulename): - vendored_name = "{0}.{1}".format(__name__, modulename) - - try: - __import__(modulename, globals(), locals(), level=0) - except ImportError: - # We can just silently allow import failures to pass here. If we - # got to this point it means that ``import pip._vendor.whatever`` - # failed and so did ``import whatever``. Since we're importing this - # upfront in an attempt to alias imports, not erroring here will - # just mean we get a regular import error whenever pip *actually* - # tries to import one of these modules to use it, which actually - # gives us a better error message than we would have otherwise - # gotten. - pass - else: - sys.modules[vendored_name] = sys.modules[modulename] - base, head = vendored_name.rsplit(".", 1) - setattr(sys.modules[base], head, sys.modules[modulename]) - - -# If we're operating in a debundled setup, then we want to go ahead and trigger -# the aliasing of our vendored libraries as well as looking for wheels to add -# to our sys.path. This will cause all of this code to be a no-op typically -# however downstream redistributors can enable it in a consistent way across -# all platforms. -if DEBUNDLED: - # Actually look inside of WHEEL_DIR to find .whl files and add them to the - # front of our sys.path. - sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path - - # Actually alias all of our vendored dependencies. - vendored("cachecontrol") - vendored("certifi") - vendored("colorama") - vendored("distlib") - vendored("distro") - vendored("six") - vendored("six.moves") - vendored("six.moves.urllib") - vendored("six.moves.urllib.parse") - vendored("packaging") - vendored("packaging.version") - vendored("packaging.specifiers") - vendored("pep517") - vendored("pkg_resources") - vendored("platformdirs") - vendored("progress") - vendored("requests") - vendored("requests.exceptions") - vendored("requests.packages") - vendored("requests.packages.urllib3") - vendored("requests.packages.urllib3._collections") - vendored("requests.packages.urllib3.connection") - vendored("requests.packages.urllib3.connectionpool") - vendored("requests.packages.urllib3.contrib") - vendored("requests.packages.urllib3.contrib.ntlmpool") - vendored("requests.packages.urllib3.contrib.pyopenssl") - vendored("requests.packages.urllib3.exceptions") - vendored("requests.packages.urllib3.fields") - vendored("requests.packages.urllib3.filepost") - vendored("requests.packages.urllib3.packages") - vendored("requests.packages.urllib3.packages.ordered_dict") - vendored("requests.packages.urllib3.packages.six") - vendored("requests.packages.urllib3.packages.ssl_match_hostname") - vendored("requests.packages.urllib3.packages.ssl_match_hostname." - "_implementation") - vendored("requests.packages.urllib3.poolmanager") - vendored("requests.packages.urllib3.request") - vendored("requests.packages.urllib3.response") - vendored("requests.packages.urllib3.util") - vendored("requests.packages.urllib3.util.connection") - vendored("requests.packages.urllib3.util.request") - vendored("requests.packages.urllib3.util.response") - vendored("requests.packages.urllib3.util.retry") - vendored("requests.packages.urllib3.util.ssl_") - vendored("requests.packages.urllib3.util.timeout") - vendored("requests.packages.urllib3.util.url") - vendored("resolvelib") - vendored("rich") - vendored("rich.console") - vendored("rich.highlighter") - vendored("rich.logging") - vendored("rich.markup") - vendored("rich.progress") - vendored("rich.segment") - vendored("rich.style") - vendored("rich.text") - vendored("rich.traceback") - vendored("tenacity") - vendored("tomli") - vendored("urllib3") diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/__version__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/__version__.py deleted file mode 100644 index 5063c3f8ee7980493efcc30c24f7e7582714aa81..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/__version__.py +++ /dev/null @@ -1,14 +0,0 @@ -# .-. .-. .-. . . .-. .-. .-. .-. -# |( |- |.| | | |- `-. | `-. -# ' ' `-' `-`.`-' `-' `-' ' `-' - -__title__ = "requests" -__description__ = "Python HTTP for Humans." -__url__ = "https://requests.readthedocs.io" -__version__ = "2.31.0" -__build__ = 0x023100 -__author__ = "Kenneth Reitz" -__author_email__ = "me@kennethreitz.org" -__license__ = "Apache 2.0" -__copyright__ = "Copyright Kenneth Reitz" -__cake__ = "\u2728 \U0001f370 \u2728" diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/color.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/color.py deleted file mode 100644 index dfe455937c86b5b7cc83f5506ae0f7010bece1b1..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/color.py +++ /dev/null @@ -1,622 +0,0 @@ -import platform -import re -from colorsys import rgb_to_hls -from enum import IntEnum -from functools import lru_cache -from typing import TYPE_CHECKING, NamedTuple, Optional, Tuple - -from ._palettes import EIGHT_BIT_PALETTE, STANDARD_PALETTE, WINDOWS_PALETTE -from .color_triplet import ColorTriplet -from .repr import Result, rich_repr -from .terminal_theme import DEFAULT_TERMINAL_THEME - -if TYPE_CHECKING: # pragma: no cover - from .terminal_theme import TerminalTheme - from .text import Text - - -WINDOWS = platform.system() == "Windows" - - -class ColorSystem(IntEnum): - """One of the 3 color system supported by terminals.""" - - STANDARD = 1 - EIGHT_BIT = 2 - TRUECOLOR = 3 - WINDOWS = 4 - - def __repr__(self) -> str: - return f"ColorSystem.{self.name}" - - def __str__(self) -> str: - return repr(self) - - -class ColorType(IntEnum): - """Type of color stored in Color class.""" - - DEFAULT = 0 - STANDARD = 1 - EIGHT_BIT = 2 - TRUECOLOR = 3 - WINDOWS = 4 - - def __repr__(self) -> str: - return f"ColorType.{self.name}" - - -ANSI_COLOR_NAMES = { - "black": 0, - "red": 1, - "green": 2, - "yellow": 3, - "blue": 4, - "magenta": 5, - "cyan": 6, - "white": 7, - "bright_black": 8, - "bright_red": 9, - "bright_green": 10, - "bright_yellow": 11, - "bright_blue": 12, - "bright_magenta": 13, - "bright_cyan": 14, - "bright_white": 15, - "grey0": 16, - "gray0": 16, - "navy_blue": 17, - "dark_blue": 18, - "blue3": 20, - "blue1": 21, - "dark_green": 22, - "deep_sky_blue4": 25, - "dodger_blue3": 26, - "dodger_blue2": 27, - "green4": 28, - "spring_green4": 29, - "turquoise4": 30, - "deep_sky_blue3": 32, - "dodger_blue1": 33, - "green3": 40, - "spring_green3": 41, - "dark_cyan": 36, - "light_sea_green": 37, - "deep_sky_blue2": 38, - "deep_sky_blue1": 39, - "spring_green2": 47, - "cyan3": 43, - "dark_turquoise": 44, - "turquoise2": 45, - "green1": 46, - "spring_green1": 48, - "medium_spring_green": 49, - "cyan2": 50, - "cyan1": 51, - "dark_red": 88, - "deep_pink4": 125, - "purple4": 55, - "purple3": 56, - "blue_violet": 57, - "orange4": 94, - "grey37": 59, - "gray37": 59, - "medium_purple4": 60, - "slate_blue3": 62, - "royal_blue1": 63, - "chartreuse4": 64, - "dark_sea_green4": 71, - "pale_turquoise4": 66, - "steel_blue": 67, - "steel_blue3": 68, - "cornflower_blue": 69, - "chartreuse3": 76, - "cadet_blue": 73, - "sky_blue3": 74, - "steel_blue1": 81, - "pale_green3": 114, - "sea_green3": 78, - "aquamarine3": 79, - "medium_turquoise": 80, - "chartreuse2": 112, - "sea_green2": 83, - "sea_green1": 85, - "aquamarine1": 122, - "dark_slate_gray2": 87, - "dark_magenta": 91, - "dark_violet": 128, - "purple": 129, - "light_pink4": 95, - "plum4": 96, - "medium_purple3": 98, - "slate_blue1": 99, - "yellow4": 106, - "wheat4": 101, - "grey53": 102, - "gray53": 102, - "light_slate_grey": 103, - "light_slate_gray": 103, - "medium_purple": 104, - "light_slate_blue": 105, - "dark_olive_green3": 149, - "dark_sea_green": 108, - "light_sky_blue3": 110, - "sky_blue2": 111, - "dark_sea_green3": 150, - "dark_slate_gray3": 116, - "sky_blue1": 117, - "chartreuse1": 118, - "light_green": 120, - "pale_green1": 156, - "dark_slate_gray1": 123, - "red3": 160, - "medium_violet_red": 126, - "magenta3": 164, - "dark_orange3": 166, - "indian_red": 167, - "hot_pink3": 168, - "medium_orchid3": 133, - "medium_orchid": 134, - "medium_purple2": 140, - "dark_goldenrod": 136, - "light_salmon3": 173, - "rosy_brown": 138, - "grey63": 139, - "gray63": 139, - "medium_purple1": 141, - "gold3": 178, - "dark_khaki": 143, - "navajo_white3": 144, - "grey69": 145, - "gray69": 145, - "light_steel_blue3": 146, - "light_steel_blue": 147, - "yellow3": 184, - "dark_sea_green2": 157, - "light_cyan3": 152, - "light_sky_blue1": 153, - "green_yellow": 154, - "dark_olive_green2": 155, - "dark_sea_green1": 193, - "pale_turquoise1": 159, - "deep_pink3": 162, - "magenta2": 200, - "hot_pink2": 169, - "orchid": 170, - "medium_orchid1": 207, - "orange3": 172, - "light_pink3": 174, - "pink3": 175, - "plum3": 176, - "violet": 177, - "light_goldenrod3": 179, - "tan": 180, - "misty_rose3": 181, - "thistle3": 182, - "plum2": 183, - "khaki3": 185, - "light_goldenrod2": 222, - "light_yellow3": 187, - "grey84": 188, - "gray84": 188, - "light_steel_blue1": 189, - "yellow2": 190, - "dark_olive_green1": 192, - "honeydew2": 194, - "light_cyan1": 195, - "red1": 196, - "deep_pink2": 197, - "deep_pink1": 199, - "magenta1": 201, - "orange_red1": 202, - "indian_red1": 204, - "hot_pink": 206, - "dark_orange": 208, - "salmon1": 209, - "light_coral": 210, - "pale_violet_red1": 211, - "orchid2": 212, - "orchid1": 213, - "orange1": 214, - "sandy_brown": 215, - "light_salmon1": 216, - "light_pink1": 217, - "pink1": 218, - "plum1": 219, - "gold1": 220, - "navajo_white1": 223, - "misty_rose1": 224, - "thistle1": 225, - "yellow1": 226, - "light_goldenrod1": 227, - "khaki1": 228, - "wheat1": 229, - "cornsilk1": 230, - "grey100": 231, - "gray100": 231, - "grey3": 232, - "gray3": 232, - "grey7": 233, - "gray7": 233, - "grey11": 234, - "gray11": 234, - "grey15": 235, - "gray15": 235, - "grey19": 236, - "gray19": 236, - "grey23": 237, - "gray23": 237, - "grey27": 238, - "gray27": 238, - "grey30": 239, - "gray30": 239, - "grey35": 240, - "gray35": 240, - "grey39": 241, - "gray39": 241, - "grey42": 242, - "gray42": 242, - "grey46": 243, - "gray46": 243, - "grey50": 244, - "gray50": 244, - "grey54": 245, - "gray54": 245, - "grey58": 246, - "gray58": 246, - "grey62": 247, - "gray62": 247, - "grey66": 248, - "gray66": 248, - "grey70": 249, - "gray70": 249, - "grey74": 250, - "gray74": 250, - "grey78": 251, - "gray78": 251, - "grey82": 252, - "gray82": 252, - "grey85": 253, - "gray85": 253, - "grey89": 254, - "gray89": 254, - "grey93": 255, - "gray93": 255, -} - - -class ColorParseError(Exception): - """The color could not be parsed.""" - - -RE_COLOR = re.compile( - r"""^ -\#([0-9a-f]{6})$| -color\(([0-9]{1,3})\)$| -rgb\(([\d\s,]+)\)$ -""", - re.VERBOSE, -) - - -@rich_repr -class Color(NamedTuple): - """Terminal color definition.""" - - name: str - """The name of the color (typically the input to Color.parse).""" - type: ColorType - """The type of the color.""" - number: Optional[int] = None - """The color number, if a standard color, or None.""" - triplet: Optional[ColorTriplet] = None - """A triplet of color components, if an RGB color.""" - - def __rich__(self) -> "Text": - """Displays the actual color if Rich printed.""" - from .style import Style - from .text import Text - - return Text.assemble( - f"", - ) - - def __rich_repr__(self) -> Result: - yield self.name - yield self.type - yield "number", self.number, None - yield "triplet", self.triplet, None - - @property - def system(self) -> ColorSystem: - """Get the native color system for this color.""" - if self.type == ColorType.DEFAULT: - return ColorSystem.STANDARD - return ColorSystem(int(self.type)) - - @property - def is_system_defined(self) -> bool: - """Check if the color is ultimately defined by the system.""" - return self.system not in (ColorSystem.EIGHT_BIT, ColorSystem.TRUECOLOR) - - @property - def is_default(self) -> bool: - """Check if the color is a default color.""" - return self.type == ColorType.DEFAULT - - def get_truecolor( - self, theme: Optional["TerminalTheme"] = None, foreground: bool = True - ) -> ColorTriplet: - """Get an equivalent color triplet for this color. - - Args: - theme (TerminalTheme, optional): Optional terminal theme, or None to use default. Defaults to None. - foreground (bool, optional): True for a foreground color, or False for background. Defaults to True. - - Returns: - ColorTriplet: A color triplet containing RGB components. - """ - - if theme is None: - theme = DEFAULT_TERMINAL_THEME - if self.type == ColorType.TRUECOLOR: - assert self.triplet is not None - return self.triplet - elif self.type == ColorType.EIGHT_BIT: - assert self.number is not None - return EIGHT_BIT_PALETTE[self.number] - elif self.type == ColorType.STANDARD: - assert self.number is not None - return theme.ansi_colors[self.number] - elif self.type == ColorType.WINDOWS: - assert self.number is not None - return WINDOWS_PALETTE[self.number] - else: # self.type == ColorType.DEFAULT: - assert self.number is None - return theme.foreground_color if foreground else theme.background_color - - @classmethod - def from_ansi(cls, number: int) -> "Color": - """Create a Color number from it's 8-bit ansi number. - - Args: - number (int): A number between 0-255 inclusive. - - Returns: - Color: A new Color instance. - """ - return cls( - name=f"color({number})", - type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT), - number=number, - ) - - @classmethod - def from_triplet(cls, triplet: "ColorTriplet") -> "Color": - """Create a truecolor RGB color from a triplet of values. - - Args: - triplet (ColorTriplet): A color triplet containing red, green and blue components. - - Returns: - Color: A new color object. - """ - return cls(name=triplet.hex, type=ColorType.TRUECOLOR, triplet=triplet) - - @classmethod - def from_rgb(cls, red: float, green: float, blue: float) -> "Color": - """Create a truecolor from three color components in the range(0->255). - - Args: - red (float): Red component in range 0-255. - green (float): Green component in range 0-255. - blue (float): Blue component in range 0-255. - - Returns: - Color: A new color object. - """ - return cls.from_triplet(ColorTriplet(int(red), int(green), int(blue))) - - @classmethod - def default(cls) -> "Color": - """Get a Color instance representing the default color. - - Returns: - Color: Default color. - """ - return cls(name="default", type=ColorType.DEFAULT) - - @classmethod - @lru_cache(maxsize=1024) - def parse(cls, color: str) -> "Color": - """Parse a color definition.""" - original_color = color - color = color.lower().strip() - - if color == "default": - return cls(color, type=ColorType.DEFAULT) - - color_number = ANSI_COLOR_NAMES.get(color) - if color_number is not None: - return cls( - color, - type=(ColorType.STANDARD if color_number < 16 else ColorType.EIGHT_BIT), - number=color_number, - ) - - color_match = RE_COLOR.match(color) - if color_match is None: - raise ColorParseError(f"{original_color!r} is not a valid color") - - color_24, color_8, color_rgb = color_match.groups() - if color_24: - triplet = ColorTriplet( - int(color_24[0:2], 16), int(color_24[2:4], 16), int(color_24[4:6], 16) - ) - return cls(color, ColorType.TRUECOLOR, triplet=triplet) - - elif color_8: - number = int(color_8) - if number > 255: - raise ColorParseError(f"color number must be <= 255 in {color!r}") - return cls( - color, - type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT), - number=number, - ) - - else: # color_rgb: - components = color_rgb.split(",") - if len(components) != 3: - raise ColorParseError( - f"expected three components in {original_color!r}" - ) - red, green, blue = components - triplet = ColorTriplet(int(red), int(green), int(blue)) - if not all(component <= 255 for component in triplet): - raise ColorParseError( - f"color components must be <= 255 in {original_color!r}" - ) - return cls(color, ColorType.TRUECOLOR, triplet=triplet) - - @lru_cache(maxsize=1024) - def get_ansi_codes(self, foreground: bool = True) -> Tuple[str, ...]: - """Get the ANSI escape codes for this color.""" - _type = self.type - if _type == ColorType.DEFAULT: - return ("39" if foreground else "49",) - - elif _type == ColorType.WINDOWS: - number = self.number - assert number is not None - fore, back = (30, 40) if number < 8 else (82, 92) - return (str(fore + number if foreground else back + number),) - - elif _type == ColorType.STANDARD: - number = self.number - assert number is not None - fore, back = (30, 40) if number < 8 else (82, 92) - return (str(fore + number if foreground else back + number),) - - elif _type == ColorType.EIGHT_BIT: - assert self.number is not None - return ("38" if foreground else "48", "5", str(self.number)) - - else: # self.standard == ColorStandard.TRUECOLOR: - assert self.triplet is not None - red, green, blue = self.triplet - return ("38" if foreground else "48", "2", str(red), str(green), str(blue)) - - @lru_cache(maxsize=1024) - def downgrade(self, system: ColorSystem) -> "Color": - """Downgrade a color system to a system with fewer colors.""" - - if self.type in (ColorType.DEFAULT, system): - return self - # Convert to 8-bit color from truecolor color - if system == ColorSystem.EIGHT_BIT and self.system == ColorSystem.TRUECOLOR: - assert self.triplet is not None - _h, l, s = rgb_to_hls(*self.triplet.normalized) - # If saturation is under 15% assume it is grayscale - if s < 0.15: - gray = round(l * 25.0) - if gray == 0: - color_number = 16 - elif gray == 25: - color_number = 231 - else: - color_number = 231 + gray - return Color(self.name, ColorType.EIGHT_BIT, number=color_number) - - red, green, blue = self.triplet - six_red = red / 95 if red < 95 else 1 + (red - 95) / 40 - six_green = green / 95 if green < 95 else 1 + (green - 95) / 40 - six_blue = blue / 95 if blue < 95 else 1 + (blue - 95) / 40 - - color_number = ( - 16 + 36 * round(six_red) + 6 * round(six_green) + round(six_blue) - ) - return Color(self.name, ColorType.EIGHT_BIT, number=color_number) - - # Convert to standard from truecolor or 8-bit - elif system == ColorSystem.STANDARD: - if self.system == ColorSystem.TRUECOLOR: - assert self.triplet is not None - triplet = self.triplet - else: # self.system == ColorSystem.EIGHT_BIT - assert self.number is not None - triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number]) - - color_number = STANDARD_PALETTE.match(triplet) - return Color(self.name, ColorType.STANDARD, number=color_number) - - elif system == ColorSystem.WINDOWS: - if self.system == ColorSystem.TRUECOLOR: - assert self.triplet is not None - triplet = self.triplet - else: # self.system == ColorSystem.EIGHT_BIT - assert self.number is not None - if self.number < 16: - return Color(self.name, ColorType.WINDOWS, number=self.number) - triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number]) - - color_number = WINDOWS_PALETTE.match(triplet) - return Color(self.name, ColorType.WINDOWS, number=color_number) - - return self - - -def parse_rgb_hex(hex_color: str) -> ColorTriplet: - """Parse six hex characters in to RGB triplet.""" - assert len(hex_color) == 6, "must be 6 characters" - color = ColorTriplet( - int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16) - ) - return color - - -def blend_rgb( - color1: ColorTriplet, color2: ColorTriplet, cross_fade: float = 0.5 -) -> ColorTriplet: - """Blend one RGB color in to another.""" - r1, g1, b1 = color1 - r2, g2, b2 = color2 - new_color = ColorTriplet( - int(r1 + (r2 - r1) * cross_fade), - int(g1 + (g2 - g1) * cross_fade), - int(b1 + (b2 - b1) * cross_fade), - ) - return new_color - - -if __name__ == "__main__": # pragma: no cover - - from .console import Console - from .table import Table - from .text import Text - - console = Console() - - table = Table(show_footer=False, show_edge=True) - table.add_column("Color", width=10, overflow="ellipsis") - table.add_column("Number", justify="right", style="yellow") - table.add_column("Name", style="green") - table.add_column("Hex", style="blue") - table.add_column("RGB", style="magenta") - - colors = sorted((v, k) for k, v in ANSI_COLOR_NAMES.items()) - for color_number, name in colors: - if "grey" in name: - continue - color_cell = Text(" " * 10, style=f"on {name}") - if color_number < 16: - table.add_row(color_cell, f"{color_number}", Text(f'"{name}"')) - else: - color = EIGHT_BIT_PALETTE[color_number] # type: ignore[has-type] - table.add_row( - color_cell, str(color_number), Text(f'"{name}"'), color.hex, color.rgb - ) - - console.print(table) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/default_styles.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/default_styles.py deleted file mode 100644 index dca37193abffab8b5b388018f895f197316ab652..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/default_styles.py +++ /dev/null @@ -1,190 +0,0 @@ -from typing import Dict - -from .style import Style - -DEFAULT_STYLES: Dict[str, Style] = { - "none": Style.null(), - "reset": Style( - color="default", - bgcolor="default", - dim=False, - bold=False, - italic=False, - underline=False, - blink=False, - blink2=False, - reverse=False, - conceal=False, - strike=False, - ), - "dim": Style(dim=True), - "bright": Style(dim=False), - "bold": Style(bold=True), - "strong": Style(bold=True), - "code": Style(reverse=True, bold=True), - "italic": Style(italic=True), - "emphasize": Style(italic=True), - "underline": Style(underline=True), - "blink": Style(blink=True), - "blink2": Style(blink2=True), - "reverse": Style(reverse=True), - "strike": Style(strike=True), - "black": Style(color="black"), - "red": Style(color="red"), - "green": Style(color="green"), - "yellow": Style(color="yellow"), - "magenta": Style(color="magenta"), - "cyan": Style(color="cyan"), - "white": Style(color="white"), - "inspect.attr": Style(color="yellow", italic=True), - "inspect.attr.dunder": Style(color="yellow", italic=True, dim=True), - "inspect.callable": Style(bold=True, color="red"), - "inspect.async_def": Style(italic=True, color="bright_cyan"), - "inspect.def": Style(italic=True, color="bright_cyan"), - "inspect.class": Style(italic=True, color="bright_cyan"), - "inspect.error": Style(bold=True, color="red"), - "inspect.equals": Style(), - "inspect.help": Style(color="cyan"), - "inspect.doc": Style(dim=True), - "inspect.value.border": Style(color="green"), - "live.ellipsis": Style(bold=True, color="red"), - "layout.tree.row": Style(dim=False, color="red"), - "layout.tree.column": Style(dim=False, color="blue"), - "logging.keyword": Style(bold=True, color="yellow"), - "logging.level.notset": Style(dim=True), - "logging.level.debug": Style(color="green"), - "logging.level.info": Style(color="blue"), - "logging.level.warning": Style(color="red"), - "logging.level.error": Style(color="red", bold=True), - "logging.level.critical": Style(color="red", bold=True, reverse=True), - "log.level": Style.null(), - "log.time": Style(color="cyan", dim=True), - "log.message": Style.null(), - "log.path": Style(dim=True), - "repr.ellipsis": Style(color="yellow"), - "repr.indent": Style(color="green", dim=True), - "repr.error": Style(color="red", bold=True), - "repr.str": Style(color="green", italic=False, bold=False), - "repr.brace": Style(bold=True), - "repr.comma": Style(bold=True), - "repr.ipv4": Style(bold=True, color="bright_green"), - "repr.ipv6": Style(bold=True, color="bright_green"), - "repr.eui48": Style(bold=True, color="bright_green"), - "repr.eui64": Style(bold=True, color="bright_green"), - "repr.tag_start": Style(bold=True), - "repr.tag_name": Style(color="bright_magenta", bold=True), - "repr.tag_contents": Style(color="default"), - "repr.tag_end": Style(bold=True), - "repr.attrib_name": Style(color="yellow", italic=False), - "repr.attrib_equal": Style(bold=True), - "repr.attrib_value": Style(color="magenta", italic=False), - "repr.number": Style(color="cyan", bold=True, italic=False), - "repr.number_complex": Style(color="cyan", bold=True, italic=False), # same - "repr.bool_true": Style(color="bright_green", italic=True), - "repr.bool_false": Style(color="bright_red", italic=True), - "repr.none": Style(color="magenta", italic=True), - "repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False), - "repr.uuid": Style(color="bright_yellow", bold=False), - "repr.call": Style(color="magenta", bold=True), - "repr.path": Style(color="magenta"), - "repr.filename": Style(color="bright_magenta"), - "rule.line": Style(color="bright_green"), - "rule.text": Style.null(), - "json.brace": Style(bold=True), - "json.bool_true": Style(color="bright_green", italic=True), - "json.bool_false": Style(color="bright_red", italic=True), - "json.null": Style(color="magenta", italic=True), - "json.number": Style(color="cyan", bold=True, italic=False), - "json.str": Style(color="green", italic=False, bold=False), - "json.key": Style(color="blue", bold=True), - "prompt": Style.null(), - "prompt.choices": Style(color="magenta", bold=True), - "prompt.default": Style(color="cyan", bold=True), - "prompt.invalid": Style(color="red"), - "prompt.invalid.choice": Style(color="red"), - "pretty": Style.null(), - "scope.border": Style(color="blue"), - "scope.key": Style(color="yellow", italic=True), - "scope.key.special": Style(color="yellow", italic=True, dim=True), - "scope.equals": Style(color="red"), - "table.header": Style(bold=True), - "table.footer": Style(bold=True), - "table.cell": Style.null(), - "table.title": Style(italic=True), - "table.caption": Style(italic=True, dim=True), - "traceback.error": Style(color="red", italic=True), - "traceback.border.syntax_error": Style(color="bright_red"), - "traceback.border": Style(color="red"), - "traceback.text": Style.null(), - "traceback.title": Style(color="red", bold=True), - "traceback.exc_type": Style(color="bright_red", bold=True), - "traceback.exc_value": Style.null(), - "traceback.offset": Style(color="bright_red", bold=True), - "bar.back": Style(color="grey23"), - "bar.complete": Style(color="rgb(249,38,114)"), - "bar.finished": Style(color="rgb(114,156,31)"), - "bar.pulse": Style(color="rgb(249,38,114)"), - "progress.description": Style.null(), - "progress.filesize": Style(color="green"), - "progress.filesize.total": Style(color="green"), - "progress.download": Style(color="green"), - "progress.elapsed": Style(color="yellow"), - "progress.percentage": Style(color="magenta"), - "progress.remaining": Style(color="cyan"), - "progress.data.speed": Style(color="red"), - "progress.spinner": Style(color="green"), - "status.spinner": Style(color="green"), - "tree": Style(), - "tree.line": Style(), - "markdown.paragraph": Style(), - "markdown.text": Style(), - "markdown.em": Style(italic=True), - "markdown.emph": Style(italic=True), # For commonmark backwards compatibility - "markdown.strong": Style(bold=True), - "markdown.code": Style(bold=True, color="cyan", bgcolor="black"), - "markdown.code_block": Style(color="cyan", bgcolor="black"), - "markdown.block_quote": Style(color="magenta"), - "markdown.list": Style(color="cyan"), - "markdown.item": Style(), - "markdown.item.bullet": Style(color="yellow", bold=True), - "markdown.item.number": Style(color="yellow", bold=True), - "markdown.hr": Style(color="yellow"), - "markdown.h1.border": Style(), - "markdown.h1": Style(bold=True), - "markdown.h2": Style(bold=True, underline=True), - "markdown.h3": Style(bold=True), - "markdown.h4": Style(bold=True, dim=True), - "markdown.h5": Style(underline=True), - "markdown.h6": Style(italic=True), - "markdown.h7": Style(italic=True, dim=True), - "markdown.link": Style(color="bright_blue"), - "markdown.link_url": Style(color="blue", underline=True), - "markdown.s": Style(strike=True), - "iso8601.date": Style(color="blue"), - "iso8601.time": Style(color="magenta"), - "iso8601.timezone": Style(color="yellow"), -} - - -if __name__ == "__main__": # pragma: no cover - import argparse - import io - - from pip._vendor.rich.console import Console - from pip._vendor.rich.table import Table - from pip._vendor.rich.text import Text - - parser = argparse.ArgumentParser() - parser.add_argument("--html", action="store_true", help="Export as HTML table") - args = parser.parse_args() - html: bool = args.html - console = Console(record=True, width=70, file=io.StringIO()) if html else Console() - - table = Table("Name", "Styling") - - for style_name, style in DEFAULT_STYLES.items(): - table.add_row(Text(style_name, style=style), str(style)) - - console.print(table) - if html: - print(console.export_html(inline_styles=True)) diff --git a/spaces/pleonova/multi-label-summary-text/utils.py b/spaces/pleonova/multi-label-summary-text/utils.py deleted file mode 100644 index 31a28479bdb21ab73b34e257d99fccf72027ed9c..0000000000000000000000000000000000000000 --- a/spaces/pleonova/multi-label-summary-text/utils.py +++ /dev/null @@ -1,85 +0,0 @@ -import streamlit as st -import numpy as np -import pandas as pd -# import plotly.express as px -# from plotly.subplots import make_subplots -import json - -# Reference: https://huggingface.co/spaces/team-zero-shot-nli/zero-shot-nli/blob/main/utils.py -# def plot_result(top_topics, scores): -# top_topics = np.array(top_topics) -# scores = np.array(scores) -# scores *= 100 -# fig = px.bar(x=np.around(scores,2), y=top_topics, orientation='h', -# labels={'x': 'Confidence Score', 'y': 'Label'}, -# text=scores, -# range_x=(0,115), -# title='Predictions', -# color=np.linspace(0,1,len(scores)), -# color_continuous_scale='GnBu') -# fig.update(layout_coloraxis_showscale=False) -# fig.update_traces(texttemplate='%{text:0.1f}%', textposition='outside') -# st.plotly_chart(fig) - - -# def plot_dual_bar_chart(topics_summary, scores_summary, topics_text, scores_text): -# data1 = pd.DataFrame({'label': topics_summary, 'scores on summary': scores_summary}) -# data2 = pd.DataFrame({'label': topics_text, 'scores on full text': scores_text}) -# data = pd.merge(data1, data2, on = ['label']) -# data.sort_values('scores on summary', ascending = True, inplace = True) - -# fig = make_subplots(rows=1, cols=2, -# subplot_titles=("Predictions on Summary", "Predictions on Full Text"), -# ) - -# fig1 = px.bar(x=round(data['scores on summary']*100, 2), y=data['label'], orientation='h', -# text=round(data['scores on summary']*100, 2), -# ) - -# fig2 = px.bar(x=round(data['scores on full text']*100,2), y=data['label'], orientation='h', -# text=round(data['scores on full text']*100,2), -# ) - -# fig.add_trace(fig1['data'][0], row=1, col=1) -# fig.add_trace(fig2['data'][0], row=1, col=2) - -# fig.update_traces(texttemplate='%{text:0.1f}%', textposition='outside') -# fig.update_layout(height=600, width=700) #, title_text="Predictions for") -# fig.update_xaxes(range=[0,115]) -# fig.update_xaxes(matches='x') -# fig.update_yaxes(showticklabels=False) # hide all the xticks -# fig.update_yaxes(showticklabels=True, row=1, col=1) - -# st.plotly_chart(fig) - -# def plot_dual_bar_chart(topics_summary, scores_summary, topics_text, scores_text): -# data1 = pd.DataFrame({'label': topics_summary, 'scores': scores_summary}) -# data1['classification_on'] = 'summary' -# data2 = pd.DataFrame({'label': topics_text, 'scores': scores_text}) -# data2['classification_on'] = 'full text' -# data = pd.concat([data1, data2]) -# data['scores'] = round(data['scores']*100,2) - -# fig = px.bar( -# data, x="scores", y="label", #orientation = 'h', -# labels={'x': 'Confidence Score', 'y': 'Label'}, -# text=data['scores'], -# range_x=(0,115), -# color="label", barmode="group", -# facet_col="classification_on", -# category_orders={"classification_on": ["summary", "full text"]} -# ) -# fig.update_traces(texttemplate='%{text:0.1f}%', textposition='outside') - -# st.plotly_chart(fig) - - -def examples_load(): - with open("examples.json") as f: - data=json.load(f) - return data['text'], data['long_text_license'], data['labels'], data['ground_labels'] - -def example_long_text_load(): - with open("example_long_text.txt", "r") as f: - text_data = f.read() - return text_data diff --git a/spaces/pog/Depression-Detector/README.md b/spaces/pog/Depression-Detector/README.md deleted file mode 100644 index e247372667caab2fcaa64d48b03f1b1098f7eff0..0000000000000000000000000000000000000000 --- a/spaces/pog/Depression-Detector/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Depression Detector -emoji: 🐠 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 2.8.14 -app_file: app.py -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/CurImagePlugin.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/CurImagePlugin.py deleted file mode 100644 index 94efff3415679a5bf5b7038f9a1da15ebc6d04ca..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/CurImagePlugin.py +++ /dev/null @@ -1,75 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# Windows Cursor support for PIL -# -# notes: -# uses BmpImagePlugin.py to read the bitmap data. -# -# history: -# 96-05-27 fl Created -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1996. -# -# See the README file for information on usage and redistribution. -# -from . import BmpImagePlugin, Image -from ._binary import i16le as i16 -from ._binary import i32le as i32 - -# -# -------------------------------------------------------------------- - - -def _accept(prefix): - return prefix[:4] == b"\0\0\2\0" - - -## -# Image plugin for Windows Cursor files. - - -class CurImageFile(BmpImagePlugin.BmpImageFile): - format = "CUR" - format_description = "Windows Cursor" - - def _open(self): - offset = self.fp.tell() - - # check magic - s = self.fp.read(6) - if not _accept(s): - msg = "not a CUR file" - raise SyntaxError(msg) - - # pick the largest cursor in the file - m = b"" - for i in range(i16(s, 4)): - s = self.fp.read(16) - if not m: - m = s - elif s[0] > m[0] and s[1] > m[1]: - m = s - if not m: - msg = "No cursors were found" - raise TypeError(msg) - - # load as bitmap - self._bitmap(i32(m, 12) + offset) - - # patch up the bitmap height - self._size = self.size[0], self.size[1] // 2 - d, e, o, a = self.tile[0] - self.tile[0] = d, (0, 0) + self.size, o, a - - return - - -# -# -------------------------------------------------------------------- - -Image.register_open(CurImageFile.format, CurImageFile, _accept) - -Image.register_extension(CurImageFile.format, ".cur") diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-1c60e84e.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-1c60e84e.css deleted file mode 100644 index ca9a861e6b82d72715a9e5d72a7fb8d22f98a8b0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-1c60e84e.css +++ /dev/null @@ -1 +0,0 @@ -.wrap.svelte-3iwdd6{display:flex;flex-direction:column;width:100%}.head.svelte-3iwdd6{display:flex;justify-content:space-between}input[type=number].svelte-3iwdd6{display:block;position:relative;outline:none!important;box-shadow:var(--input-shadow);border:var(--input-border-width) solid var(--input-border-color);border-radius:var(--input-radius);background:var(--input-background-fill);padding:var(--size-2) var(--size-2);height:var(--size-6);color:var(--body-text-color);font-size:var(--input-text-size);line-height:var(--line-sm);text-align:center}input.svelte-3iwdd6:disabled{-webkit-text-fill-color:var(--body-text-color);-webkit-opacity:1;opacity:1}input[type=number].svelte-3iwdd6:focus{box-shadow:var(--input-shadow-focus);border-color:var(--input-border-color-focus)}input.svelte-3iwdd6::placeholder{color:var(--input-placeholder-color)}input[disabled].svelte-3iwdd6{cursor:not-allowed}input[type=range].svelte-3iwdd6{-webkit-appearance:none;appearance:none;width:100%;accent-color:var(--slider-color);height:4px;background:var(--neutral-200);border-radius:5px;background-image:linear-gradient(var(--slider-color),var(--slider-color));background-size:0% 100%;background-repeat:no-repeat}input[type=range].svelte-3iwdd6::-webkit-slider-thumb{-webkit-appearance:none;box-shadow:var(--input-shadow);border:solid .5px #ddd;height:20px;width:20px;border-radius:50%;background-color:#fff;cursor:pointer;margin-top:-2px;transition:background-color .1s ease}input[type=range].svelte-3iwdd6::-webkit-slider-thumb:hover{background:var(--neutral-50)}input[type=range].svelte-3iwdd6::-webkit-slider-runnable-track{-webkit-appearance:none;box-shadow:none;border:none;background:transparent;height:400%}input[type=range].svelte-3iwdd6::-moz-range-track{height:12px} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-ef54ac87.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-ef54ac87.js deleted file mode 100644 index 5a6b48044f5bba2fb9183fd5353a8c6d84f69839..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-ef54ac87.js +++ /dev/null @@ -1,14 +0,0 @@ -import{C as R,E as m,L as C,a as u}from"./index-b5ab13e3.js";import{s as z,t as e,y as n,h as W,L as I,i as E,w as Y,z as A,d as J,f as L,c as N,A as k,e as D,B,C as H,v as K,E as b,I as M,m as F,x as OO}from"./Index-9bf8add7.js";import"./index-50ad4c77.js";import"./svelte/svelte.js";import"./Button-8eeccca1.js";import"./Index-c74a8b7c.js";import"./Copy-1b5c0932.js";import"./Download-696bd40c.js";import"./BlockLabel-e3970ebb.js";import"./Empty-eeaba2d1.js";import"./Example-e03fb3b4.js";const y=301,j=1,QO=2,d=302,eO=304,aO=305,iO=3,$O=4,tO=[9,10,11,12,13,32,133,160,5760,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8232,8233,8239,8287,12288],_=125,rO=59,x=47,SO=42,PO=43,nO=45,oO=new R({start:!1,shift(O,Q){return Q==iO||Q==$O||Q==eO?O:Q==aO},strict:!1}),ZO=new m((O,Q)=>{let{next:i}=O;(i==_||i==-1||Q.context)&&Q.canShift(d)&&O.acceptToken(d)},{contextual:!0,fallback:!0}),lO=new m((O,Q)=>{let{next:i}=O,a;tO.indexOf(i)>-1||i==x&&((a=O.peek(1))==x||a==SO)||i!=_&&i!=rO&&i!=-1&&!Q.context&&Q.canShift(y)&&O.acceptToken(y)},{contextual:!0}),XO=new m((O,Q)=>{let{next:i}=O;if((i==PO||i==nO)&&(O.advance(),i==O.next)){O.advance();let a=!Q.context&&Q.canShift(j);O.acceptToken(a?j:QO)}},{contextual:!0}),cO=z({"get set async static":e.modifier,"for while do if else switch try catch finally return throw break continue default case":e.controlKeyword,"in of await yield void typeof delete instanceof":e.operatorKeyword,"let var const function class extends":e.definitionKeyword,"import export from":e.moduleKeyword,"with debugger as new":e.keyword,TemplateString:e.special(e.string),super:e.atom,BooleanLiteral:e.bool,this:e.self,null:e.null,Star:e.modifier,VariableName:e.variableName,"CallExpression/VariableName TaggedTemplateExpression/VariableName":e.function(e.variableName),VariableDefinition:e.definition(e.variableName),Label:e.labelName,PropertyName:e.propertyName,PrivatePropertyName:e.special(e.propertyName),"CallExpression/MemberExpression/PropertyName":e.function(e.propertyName),"FunctionDeclaration/VariableDefinition":e.function(e.definition(e.variableName)),"ClassDeclaration/VariableDefinition":e.definition(e.className),PropertyDefinition:e.definition(e.propertyName),PrivatePropertyDefinition:e.definition(e.special(e.propertyName)),UpdateOp:e.updateOperator,LineComment:e.lineComment,BlockComment:e.blockComment,Number:e.number,String:e.string,Escape:e.escape,ArithOp:e.arithmeticOperator,LogicOp:e.logicOperator,BitOp:e.bitwiseOperator,CompareOp:e.compareOperator,RegExp:e.regexp,Equals:e.definitionOperator,Arrow:e.function(e.punctuation),": Spread":e.punctuation,"( )":e.paren,"[ ]":e.squareBracket,"{ }":e.brace,"InterpolationStart InterpolationEnd":e.special(e.brace),".":e.derefOperator,", ;":e.separator,"@":e.meta,TypeName:e.typeName,TypeDefinition:e.definition(e.typeName),"type enum interface implements namespace module declare":e.definitionKeyword,"abstract global Privacy readonly override":e.modifier,"is keyof unique infer":e.operatorKeyword,JSXAttributeValue:e.attributeValue,JSXText:e.content,"JSXStartTag JSXStartCloseTag JSXSelfCloseEndTag JSXEndTag":e.angleBracket,"JSXIdentifier JSXNameSpacedName":e.tagName,"JSXAttribute/JSXIdentifier JSXAttribute/JSXNameSpacedName":e.attributeName,"JSXBuiltin/JSXIdentifier":e.standard(e.tagName)}),sO={__proto__:null,export:14,as:19,from:27,default:30,async:35,function:36,extends:46,this:50,true:58,false:58,null:70,void:74,typeof:78,super:96,new:130,delete:146,yield:155,await:159,class:164,public:219,private:219,protected:219,readonly:221,instanceof:240,satisfies:243,in:244,const:246,import:278,keyof:333,unique:337,infer:343,is:379,abstract:399,implements:401,type:403,let:406,var:408,interface:415,enum:419,namespace:425,module:427,declare:431,global:435,for:456,of:465,while:468,with:472,do:476,if:480,else:482,switch:486,case:492,try:498,catch:502,finally:506,return:510,throw:514,break:518,continue:522,debugger:526},pO={__proto__:null,async:117,get:119,set:121,public:181,private:181,protected:181,static:183,abstract:185,override:187,readonly:193,accessor:195,new:383},gO={__proto__:null,"<":137},YO=C.deserialize({version:14,states:"$BhO`QUOOO%QQUOOO'TQWOOP(_OSOOO*mQ(CjO'#CfO*tOpO'#CgO+SO!bO'#CgO+bO07`O'#DZO-sQUO'#DaO.TQUO'#DlO%QQUO'#DvO0[QUO'#EOOOQ(CY'#EW'#EWO0rQSO'#ETOOQO'#I_'#I_O0zQSO'#GjOOQO'#Eh'#EhO1VQSO'#EgO1[QSO'#EgO3^Q(CjO'#JbO5}Q(CjO'#JcO6kQSO'#FVO6pQ#tO'#FnOOQ(CY'#F_'#F_O6{O&jO'#F_O7ZQ,UO'#FuO8qQSO'#FtOOQ(CY'#Jc'#JcOOQ(CW'#Jb'#JbOOQQ'#J|'#J|O8vQSO'#IOO8{Q(C[O'#IPOOQQ'#JO'#JOOOQQ'#IT'#ITQ`QUOOO%QQUO'#DnO9TQUO'#DzO%QQUO'#D|O9[QSO'#GjO9aQ,UO'#ClO9oQSO'#EfO9zQSO'#EqO:PQ,UO'#F^O:nQSO'#GjO:sQSO'#GnO;OQSO'#GnO;^QSO'#GqO;^QSO'#GrO;^QSO'#GtO9[QSO'#GwO;}QSO'#GzO=`QSO'#CbO=pQSO'#HXO=xQSO'#H_O=xQSO'#HaO`QUO'#HcO=xQSO'#HeO=xQSO'#HhO=}QSO'#HnO>SQ(C]O'#HtO%QQUO'#HvO>_Q(C]O'#HxO>jQ(C]O'#HzO8{Q(C[O'#H|O>uQ(CjO'#CfO?wQWO'#DfQOQSOOO@_QSO'#EPO9aQ,UO'#EfO@jQSO'#EfO@uQ`O'#F^OOQQ'#Cd'#CdOOQ(CW'#Dk'#DkOOQ(CW'#Jf'#JfO%QQUO'#JfOBOQWO'#E_OOQ(CW'#E^'#E^OBYQ(C`O'#E_OBtQWO'#ESOOQO'#Ji'#JiOCYQWO'#ESOCgQWO'#E_OC}QWO'#EeODQQWO'#E_O@}QWO'#E_OBtQWO'#E_PDkO?MpO'#C`POOO)CDm)CDmOOOO'#IU'#IUODvOpO,59ROOQ(CY,59R,59ROOOO'#IV'#IVOEUO!bO,59RO%QQUO'#D]OOOO'#IX'#IXOEdO07`O,59uOOQ(CY,59u,59uOErQUO'#IYOFVQSO'#JdOHXQbO'#JdO+pQUO'#JdOH`QSO,59{OHvQSO'#EhOITQSO'#JqOI`QSO'#JpOI`QSO'#JpOIhQSO,5;UOImQSO'#JoOOQ(CY,5:W,5:WOItQUO,5:WOKuQ(CjO,5:bOLfQSO,5:jOLkQSO'#JmOMeQ(C[O'#JnO:sQSO'#JmOMlQSO'#JmOMtQSO,5;TOMyQSO'#JmOOQ(CY'#Cf'#CfO%QQUO'#EOONmQ`O,5:oOOQO'#Jj'#JjOOQO-E<]-E<]O9[QSO,5=UO! TQSO,5=UO! YQUO,5;RO!#]Q,UO'#EcO!$pQSO,5;RO!&YQ,UO'#DpO!&aQUO'#DuO!&kQWO,5;[O!&sQWO,5;[O%QQUO,5;[OOQQ'#E}'#E}OOQQ'#FP'#FPO%QQUO,5;]O%QQUO,5;]O%QQUO,5;]O%QQUO,5;]O%QQUO,5;]O%QQUO,5;]O%QQUO,5;]O%QQUO,5;]O%QQUO,5;]O%QQUO,5;]O%QQUO,5;]OOQQ'#FT'#FTO!'RQUO,5;nOOQ(CY,5;s,5;sOOQ(CY,5;t,5;tO!)UQSO,5;tOOQ(CY,5;u,5;uO%QQUO'#IeO!)^Q(C[O,5jOOQQ'#JW'#JWOOQQ,5>k,5>kOOQQ-EgQWO'#EkOOQ(CW'#Jo'#JoO!>nQ(C[O'#J}O8{Q(C[O,5=YO;^QSO,5=`OOQO'#Cr'#CrO!>yQWO,5=]O!?RQ,UO,5=^O!?^QSO,5=`O!?cQ`O,5=cO=}QSO'#G|O9[QSO'#HOO!?kQSO'#HOO9aQ,UO'#HRO!?pQSO'#HROOQQ,5=f,5=fO!?uQSO'#HSO!?}QSO'#ClO!@SQSO,58|O!@^QSO,58|O!BfQUO,58|OOQQ,58|,58|O!BsQ(C[O,58|O%QQUO,58|O!COQUO'#HZOOQQ'#H['#H[OOQQ'#H]'#H]O`QUO,5=sO!C`QSO,5=sO`QUO,5=yO`QUO,5={O!CeQSO,5=}O`QUO,5>PO!CjQSO,5>SO!CoQUO,5>YOOQQ,5>`,5>`O%QQUO,5>`O8{Q(C[O,5>bOOQQ,5>d,5>dO!GvQSO,5>dOOQQ,5>f,5>fO!GvQSO,5>fOOQQ,5>h,5>hO!G{QWO'#DXO%QQUO'#JfO!HjQWO'#JfO!IXQWO'#DgO!IjQWO'#DgO!K{QUO'#DgO!LSQSO'#JeO!L[QSO,5:QO!LaQSO'#ElO!LoQSO'#JrO!LwQSO,5;VO!L|QWO'#DgO!MZQWO'#EROOQ(CY,5:k,5:kO%QQUO,5:kO!MbQSO,5:kO=}QSO,5;QO!;xQWO,5;QO!tO+pQUO,5>tOOQO,5>z,5>zO#$vQUO'#IYOOQO-EtO$8XQSO1G5jO$8aQSO1G5vO$8iQbO1G5wO:sQSO,5>zO$8sQSO1G5sO$8sQSO1G5sO:sQSO1G5sO$8{Q(CjO1G5tO%QQUO1G5tO$9]Q(C[O1G5tO$9nQSO,5>|O:sQSO,5>|OOQO,5>|,5>|O$:SQSO,5>|OOQO-E<`-E<`OOQO1G0]1G0]OOQO1G0_1G0_O!)XQSO1G0_OOQQ7+([7+([O!#]Q,UO7+([O%QQUO7+([O$:bQSO7+([O$:mQ,UO7+([O$:{Q(CjO,59nO$=TQ(CjO,5UOOQQ,5>U,5>UO%QQUO'#HkO%&qQSO'#HmOOQQ,5>[,5>[O:sQSO,5>[OOQQ,5>^,5>^OOQQ7+)`7+)`OOQQ7+)f7+)fOOQQ7+)j7+)jOOQQ7+)l7+)lO%&vQWO1G5lO%'[Q$IUO1G0rO%'fQSO1G0rOOQO1G/m1G/mO%'qQ$IUO1G/mO=}QSO1G/mO!'RQUO'#DgOOQO,5>u,5>uOOQO-E{,5>{OOQO-E<_-E<_O!;xQWO1G/mOOQO-E<[-E<[OOQ(CY1G0X1G0XOOQ(CY7+%q7+%qO!MeQSO7+%qOOQ(CY7+&W7+&WO=}QSO7+&WO!;xQWO7+&WOOQO7+%t7+%tO$7kQ(CjO7+&POOQO7+&P7+&PO%QQUO7+&PO%'{Q(C[O7+&PO=}QSO7+%tO!;xQWO7+%tO%(WQ(C[O7+&POBtQWO7+%tO%(fQ(C[O7+&PO%(zQ(C`O7+&PO%)UQWO7+%tOBtQWO7+&PO%)cQWO7+&PO%)yQSO7++_O%)yQSO7++_O%*RQ(CjO7++`O%QQUO7++`OOQO1G4h1G4hO:sQSO1G4hO%*cQSO1G4hOOQO7+%y7+%yO!MeQSO<vOOQO-EwO%QQUO,5>wOOQO-ESQ$IUO1G0wO%>ZQ$IUO1G0wO%@RQ$IUO1G0wO%@fQ(CjO<VOOQQ,5>X,5>XO&#WQSO1G3vO:sQSO7+&^O!'RQUO7+&^OOQO7+%X7+%XO&#]Q$IUO1G5wO=}QSO7+%XOOQ(CY<zAN>zO%QQUOAN?VO=}QSOAN>zO&<^Q(C[OAN?VO!;xQWOAN>zO&zO&RO!V+iO^(qX'j(qX~O#W+mO'|%OO~Og+pO!X$yO'|%OO~O!X+rO~Oy+tO!XXO~O!t+yO~Ob,OO~O's#jO!W(sP~Ob%lO~O%a!OO's%|O~PRO!V,yO!W(fa~O!W2SO~P'TO^%^O#W2]O'j%^O~O^%^O!a#rO#W2]O'j%^O~O^%^O!a#rO!h%ZO!l2aO#W2]O'j%^O'|%OO(`'dO~O!]2bO!^2bO't!iO~PBtO![2eO!]2bO!^2bO#S2fO#T2fO't!iO~PBtO![2eO!]2bO!^2bO#P2gO#S2fO#T2fO't!iO~PBtO^%^O!a#rO!l2aO#W2]O'j%^O(`'dO~O^%^O'j%^O~P!3jO!V$^Oo$ja~O!S&|i!V&|i~P!3jO!V'xO!S(Wi~O!V(PO!S(di~O!S(ei!V(ei~P!3jO!V(]O!g(ai~O!V(bi!g(bi^(bi'j(bi~P!3jO#W2kO!V(bi!g(bi^(bi'j(bi~O|%vO!X%wO!x]O#a2nO#b2mO's%eO~O|%vO!X%wO#b2mO's%eO~Og2uO!X'QO%`2tO~Og2uO!X'QO%`2tO'|%OO~O#cvaPvaXva^vakva!eva!fva!hva!lva#fva#gva#hva#iva#jva#kva#lva#mva#nva#pva#rva#tva#uva'jva(Qva(`va!gva!Sva'hvaova!Xva%`va!ava~P#M{O#c$kaP$kaX$ka^$kak$kaz$ka!e$ka!f$ka!h$ka!l$ka#f$ka#g$ka#h$ka#i$ka#j$ka#k$ka#l$ka#m$ka#n$ka#p$ka#r$ka#t$ka#u$ka'j$ka(Q$ka(`$ka!g$ka!S$ka'h$kao$ka!X$ka%`$ka!a$ka~P#NqO#c$maP$maX$ma^$mak$maz$ma!e$ma!f$ma!h$ma!l$ma#f$ma#g$ma#h$ma#i$ma#j$ma#k$ma#l$ma#m$ma#n$ma#p$ma#r$ma#t$ma#u$ma'j$ma(Q$ma(`$ma!g$ma!S$ma'h$mao$ma!X$ma%`$ma!a$ma~P$ dO#c${aP${aX${a^${ak${az${a!V${a!e${a!f${a!h${a!l${a#f${a#g${a#h${a#i${a#j${a#k${a#l${a#m${a#n${a#p${a#r${a#t${a#u${a'j${a(Q${a(`${a!g${a!S${a'h${a#W${ao${a!X${a%`${a!a${a~P#(yO^#Zq!V#Zq'j#Zq'h#Zq!S#Zq!g#Zqo#Zq!X#Zq%`#Zq!a#Zq~P!3jOd'OX!V'OX~P!$uO!V._Od(Za~O!U2}O!V'PX!g'PX~P%QO!V.bO!g([a~O!V.bO!g([a~P!3jO!S3QO~O#x!ja!W!ja~PI{O#x!ba!V!ba!W!ba~P#?dO#x!na!W!na~P!6TO#x!pa!W!pa~P!8nO!X3dO$TfO$^3eO~O!W3iO~Oo3jO~P#(yO^$gq!V$gq'j$gq'h$gq!S$gq!g$gqo$gq!X$gq%`$gq!a$gq~P!3jO!S3kO~Ol.}O'uTO'xUO~Oy)sO|)tO(h)xOg%Wi(g%Wi!V%Wi#W%Wi~Od%Wi#x%Wi~P$HbOy)sO|)tOg%Yi(g%Yi(h%Yi!V%Yi#W%Yi~Od%Yi#x%Yi~P$ITO(`$WO~P#(yO!U3nO's%eO!V'YX!g'YX~O!V/VO!g(ma~O!V/VO!a#rO!g(ma~O!V/VO!a#rO(`'dO!g(ma~Od$ti!V$ti#W$ti#x$ti~P!-jO!U3vO's*UO!S'[X!V'[X~P!.XO!V/_O!S(na~O!V/_O!S(na~P#(yO!a#rO~O!a#rO#n4OO~Ok4RO!a#rO(`'dO~Od(Oi!V(Oi~P!-jO#W4UOd(Oi!V(Oi~P!-jO!g4XO~O^$hq!V$hq'j$hq'h$hq!S$hq!g$hqo$hq!X$hq%`$hq!a$hq~P!3jO!V4]O!X(oX~P#(yO!f#tO~P3zO!X$rX%TYX^$rX!V$rX'j$rX~P!,aO%T4_OghXyhX|hX!XhX(ghX(hhX^hX!VhX'jhX~O%T4_O~O%a4fO's+WO'uTO'xUO!V'eX!W'eX~O!V0_O!W(ua~OX4jO~O]4kO~O!S4oO~O^%^O'j%^O~P#(yO!X$yO~P#(yO!V4tO#W4vO!W(rX~O!W4wO~Ol!kO|4yO![5WO!]4}O!^4}O!x;oO!|5VO!}5UO#O5UO#P5TO#S5SO#T!wO't!iO'uTO'xUO(T!jO(_!nO~O!W5RO~P%#XOg5]O!X0zO%`5[O~Og5]O!X0zO%`5[O'|%OO~O's#jO!V'dX!W'dX~O!V1VO!W(sa~O'uTO'xUO(T5fO~O]5jO~O!g5mO~P%QO^5oO~O^5oO~P%QO#n5qO&Q5rO~PMPO_1mO!W5vO&`1lO~P`O!a5xO~O!a5zO!V(Yi!W(Yi!a(Yi!h(Yi'|(Yi~O!V#`i!W#`i~P#?dO#W5{O!V#`i!W#`i~O!V!Zi!W!Zi~P#?dO^%^O#W6UO'j%^O~O^%^O!a#rO#W6UO'j%^O~O^%^O!a#rO!l6ZO#W6UO'j%^O(`'dO~O!h%ZO'|%OO~P%(fO!]6[O!^6[O't!iO~PBtO![6_O!]6[O!^6[O#S6`O#T6`O't!iO~PBtO!V(]O!g(aq~O!V(bq!g(bq^(bq'j(bq~P!3jO|%vO!X%wO#b6dO's%eO~O!X'QO%`6gO~Og6jO!X'QO%`6gO~O#c%WiP%WiX%Wi^%Wik%Wiz%Wi!e%Wi!f%Wi!h%Wi!l%Wi#f%Wi#g%Wi#h%Wi#i%Wi#j%Wi#k%Wi#l%Wi#m%Wi#n%Wi#p%Wi#r%Wi#t%Wi#u%Wi'j%Wi(Q%Wi(`%Wi!g%Wi!S%Wi'h%Wio%Wi!X%Wi%`%Wi!a%Wi~P$HbO#c%YiP%YiX%Yi^%Yik%Yiz%Yi!e%Yi!f%Yi!h%Yi!l%Yi#f%Yi#g%Yi#h%Yi#i%Yi#j%Yi#k%Yi#l%Yi#m%Yi#n%Yi#p%Yi#r%Yi#t%Yi#u%Yi'j%Yi(Q%Yi(`%Yi!g%Yi!S%Yi'h%Yio%Yi!X%Yi%`%Yi!a%Yi~P$ITO#c$tiP$tiX$ti^$tik$tiz$ti!V$ti!e$ti!f$ti!h$ti!l$ti#f$ti#g$ti#h$ti#i$ti#j$ti#k$ti#l$ti#m$ti#n$ti#p$ti#r$ti#t$ti#u$ti'j$ti(Q$ti(`$ti!g$ti!S$ti'h$ti#W$tio$ti!X$ti%`$ti!a$ti~P#(yOd'Oa!V'Oa~P!-jO!V'Pa!g'Pa~P!3jO!V.bO!g([i~O#x#Zi!V#Zi!W#Zi~P#?dOP$YOy#vOz#wO|#xO!f#tO!h#uO!l$YO(QVOX#eik#ei!e#ei#g#ei#h#ei#i#ei#j#ei#k#ei#l#ei#m#ei#n#ei#p#ei#r#ei#t#ei#u#ei#x#ei(`#ei(g#ei(h#ei!V#ei!W#ei~O#f#ei~P%2xO#f;wO~P%2xOP$YOy#vOz#wO|#xO!f#tO!h#uO!l$YO#f;wO#g;xO#h;xO#i;xO(QVOX#ei!e#ei#j#ei#k#ei#l#ei#m#ei#n#ei#p#ei#r#ei#t#ei#u#ei#x#ei(`#ei(g#ei(h#ei!V#ei!W#ei~Ok#ei~P%5TOk;yO~P%5TOP$YOk;yOy#vOz#wO|#xO!f#tO!h#uO!l$YO#f;wO#g;xO#h;xO#i;xO#j;zO(QVO#p#ei#r#ei#t#ei#u#ei#x#ei(`#ei(g#ei(h#ei!V#ei!W#ei~OX#ei!e#ei#k#ei#l#ei#m#ei#n#ei~P%7`OXbO^#vy!V#vy'j#vy'h#vy!S#vy!g#vyo#vy!X#vy%`#vy!a#vy~P!3jOg=jOy)sO|)tO(g)vO(h)xO~OP#eiX#eik#eiz#ei!e#ei!f#ei!h#ei!l#ei#f#ei#g#ei#h#ei#i#ei#j#ei#k#ei#l#ei#m#ei#n#ei#p#ei#r#ei#t#ei#u#ei#x#ei(Q#ei(`#ei!V#ei!W#ei~P%AYO!f#tOP(PXX(PXg(PXk(PXy(PXz(PX|(PX!e(PX!h(PX!l(PX#f(PX#g(PX#h(PX#i(PX#j(PX#k(PX#l(PX#m(PX#n(PX#p(PX#r(PX#t(PX#u(PX#x(PX(Q(PX(`(PX(g(PX(h(PX!V(PX!W(PX~O#x#yi!V#yi!W#yi~P#?dO#x!ni!W!ni~P$!qO!W6vO~O!V'Xa!W'Xa~P#?dO!a#rO(`'dO!V'Ya!g'Ya~O!V/VO!g(mi~O!V/VO!a#rO!g(mi~Od$tq!V$tq#W$tq#x$tq~P!-jO!S'[a!V'[a~P#(yO!a6}O~O!V/_O!S(ni~P#(yO!V/_O!S(ni~O!S7RO~O!a#rO#n7WO~Ok7XO!a#rO(`'dO~O!S7ZO~Od$vq!V$vq#W$vq#x$vq~P!-jO^$hy!V$hy'j$hy'h$hy!S$hy!g$hyo$hy!X$hy%`$hy!a$hy~P!3jO!V4]O!X(oa~O^#Zy!V#Zy'j#Zy'h#Zy!S#Zy!g#Zyo#Zy!X#Zy%`#Zy!a#Zy~P!3jOX7`O~O!V0_O!W(ui~O]7fO~O!a5zO~O(T(qO!V'aX!W'aX~O!V4tO!W(ra~O!h%ZO'|%OO^(YX!a(YX!l(YX#W(YX'j(YX(`(YX~O's7oO~P.[O!x;oO!|7rO!}7qO#O7qO#P7pO#S'bO#T'bO~PBtO^%^O!a#rO!l'hO#W'fO'j%^O(`'dO~O!W7vO~P%#XOl!kO'uTO'xUO(T!jO(_!nO~O|7wO~P%MdO![7{O!]7zO!^7zO#P7pO#S'bO#T'bO't!iO~PBtO![7{O!]7zO!^7zO!}7|O#O7|O#P7pO#S'bO#T'bO't!iO~PBtO!]7zO!^7zO't!iO(T!jO(_!nO~O!X0zO~O!X0zO%`8OO~Og8RO!X0zO%`8OO~OX8WO!V'da!W'da~O!V1VO!W(si~O!g8[O~O!g8]O~O!g8^O~O!g8^O~P%QO^8`O~O!a8cO~O!g8dO~O!V(ei!W(ei~P#?dO^%^O#W8lO'j%^O~O^%^O!a#rO#W8lO'j%^O~O^%^O!a#rO!l8pO#W8lO'j%^O(`'dO~O!h%ZO'|%OO~P&$QO!]8qO!^8qO't!iO~PBtO!V(]O!g(ay~O!V(by!g(by^(by'j(by~P!3jO!X'QO%`8uO~O#c$tqP$tqX$tq^$tqk$tqz$tq!V$tq!e$tq!f$tq!h$tq!l$tq#f$tq#g$tq#h$tq#i$tq#j$tq#k$tq#l$tq#m$tq#n$tq#p$tq#r$tq#t$tq#u$tq'j$tq(Q$tq(`$tq!g$tq!S$tq'h$tq#W$tqo$tq!X$tq%`$tq!a$tq~P#(yO#c$vqP$vqX$vq^$vqk$vqz$vq!V$vq!e$vq!f$vq!h$vq!l$vq#f$vq#g$vq#h$vq#i$vq#j$vq#k$vq#l$vq#m$vq#n$vq#p$vq#r$vq#t$vq#u$vq'j$vq(Q$vq(`$vq!g$vq!S$vq'h$vq#W$vqo$vq!X$vq%`$vq!a$vq~P#(yO!V'Pi!g'Pi~P!3jO#x#Zq!V#Zq!W#Zq~P#?dOy/yOz/yO|/zOPvaXvagvakva!eva!fva!hva!lva#fva#gva#hva#iva#jva#kva#lva#mva#nva#pva#rva#tva#uva#xva(Qva(`va(gva(hva!Vva!Wva~Oy)sO|)tOP$kaX$kag$kak$kaz$ka!e$ka!f$ka!h$ka!l$ka#f$ka#g$ka#h$ka#i$ka#j$ka#k$ka#l$ka#m$ka#n$ka#p$ka#r$ka#t$ka#u$ka#x$ka(Q$ka(`$ka(g$ka(h$ka!V$ka!W$ka~Oy)sO|)tOP$maX$mag$mak$maz$ma!e$ma!f$ma!h$ma!l$ma#f$ma#g$ma#h$ma#i$ma#j$ma#k$ma#l$ma#m$ma#n$ma#p$ma#r$ma#t$ma#u$ma#x$ma(Q$ma(`$ma(g$ma(h$ma!V$ma!W$ma~OP${aX${ak${az${a!e${a!f${a!h${a!l${a#f${a#g${a#h${a#i${a#j${a#k${a#l${a#m${a#n${a#p${a#r${a#t${a#u${a#x${a(Q${a(`${a!V${a!W${a~P%AYO#x$gq!V$gq!W$gq~P#?dO#x$hq!V$hq!W$hq~P#?dO!W9PO~O#x9QO~P!-jO!a#rO!V'Yi!g'Yi~O!a#rO(`'dO!V'Yi!g'Yi~O!V/VO!g(mq~O!S'[i!V'[i~P#(yO!V/_O!S(nq~O!S9WO~P#(yO!S9WO~Od(Oy!V(Oy~P!-jO!V'_a!X'_a~P#(yO!X%Sq^%Sq!V%Sq'j%Sq~P#(yOX9]O~O!V0_O!W(uq~O#W9aO!V'aa!W'aa~O!V4tO!W(ri~P#?dOPYXXYXkYXyYXzYX|YX!SYX!VYX!eYX!fYX!hYX!lYX#WYX#ccX#fYX#gYX#hYX#iYX#jYX#kYX#lYX#mYX#nYX#pYX#rYX#tYX#uYX#zYX(QYX(`YX(gYX(hYX~O!a%QX#n%QX~P&6lO#S-cO#T-cO~PBtO#P9eO#S-cO#T-cO~PBtO!}9fO#O9fO#P9eO#S-cO#T-cO~PBtO!]9iO!^9iO't!iO(T!jO(_!nO~O![9lO!]9iO!^9iO#P9eO#S-cO#T-cO't!iO~PBtO!X0zO%`9oO~O'uTO'xUO(T9tO~O!V1VO!W(sq~O!g9wO~O!g9wO~P%QO!g9yO~O!g9zO~O#W9|O!V#`y!W#`y~O!V#`y!W#`y~P#?dO^%^O#W:QO'j%^O~O^%^O!a#rO#W:QO'j%^O~O^%^O!a#rO!l:UO#W:QO'j%^O(`'dO~O!X'QO%`:XO~O#x#vy!V#vy!W#vy~P#?dOP$tiX$tik$tiz$ti!e$ti!f$ti!h$ti!l$ti#f$ti#g$ti#h$ti#i$ti#j$ti#k$ti#l$ti#m$ti#n$ti#p$ti#r$ti#t$ti#u$ti#x$ti(Q$ti(`$ti!V$ti!W$ti~P%AYOy)sO|)tO(h)xOP%WiX%Wig%Wik%Wiz%Wi!e%Wi!f%Wi!h%Wi!l%Wi#f%Wi#g%Wi#h%Wi#i%Wi#j%Wi#k%Wi#l%Wi#m%Wi#n%Wi#p%Wi#r%Wi#t%Wi#u%Wi#x%Wi(Q%Wi(`%Wi(g%Wi!V%Wi!W%Wi~Oy)sO|)tOP%YiX%Yig%Yik%Yiz%Yi!e%Yi!f%Yi!h%Yi!l%Yi#f%Yi#g%Yi#h%Yi#i%Yi#j%Yi#k%Yi#l%Yi#m%Yi#n%Yi#p%Yi#r%Yi#t%Yi#u%Yi#x%Yi(Q%Yi(`%Yi(g%Yi(h%Yi!V%Yi!W%Yi~O#x$hy!V$hy!W$hy~P#?dO#x#Zy!V#Zy!W#Zy~P#?dO!a#rO!V'Yq!g'Yq~O!V/VO!g(my~O!S'[q!V'[q~P#(yO!S:`O~P#(yO!V0_O!W(uy~O!V4tO!W(rq~O#S2fO#T2fO~PBtO#P:gO#S2fO#T2fO~PBtO!]:kO!^:kO't!iO(T!jO(_!nO~O!X0zO%`:nO~O!g:qO~O^%^O#W:vO'j%^O~O^%^O!a#rO#W:vO'j%^O~O!X'QO%`:{O~OP$tqX$tqk$tqz$tq!e$tq!f$tq!h$tq!l$tq#f$tq#g$tq#h$tq#i$tq#j$tq#k$tq#l$tq#m$tq#n$tq#p$tq#r$tq#t$tq#u$tq#x$tq(Q$tq(`$tq!V$tq!W$tq~P%AYOP$vqX$vqk$vqz$vq!e$vq!f$vq!h$vq!l$vq#f$vq#g$vq#h$vq#i$vq#j$vq#k$vq#l$vq#m$vq#n$vq#p$vq#r$vq#t$vq#u$vq#x$vq(Q$vq(`$vq!V$vq!W$vq~P%AYOd%[!Z!V%[!Z#W%[!Z#x%[!Z~P!-jO!V'aq!W'aq~P#?dO#S6`O#T6`O~PBtO!V#`!Z!W#`!Z~P#?dO^%^O#W;ZO'j%^O~O#c%[!ZP%[!ZX%[!Z^%[!Zk%[!Zz%[!Z!V%[!Z!e%[!Z!f%[!Z!h%[!Z!l%[!Z#f%[!Z#g%[!Z#h%[!Z#i%[!Z#j%[!Z#k%[!Z#l%[!Z#m%[!Z#n%[!Z#p%[!Z#r%[!Z#t%[!Z#u%[!Z'j%[!Z(Q%[!Z(`%[!Z!g%[!Z!S%[!Z'h%[!Z#W%[!Zo%[!Z!X%[!Z%`%[!Z!a%[!Z~P#(yOP%[!ZX%[!Zk%[!Zz%[!Z!e%[!Z!f%[!Z!h%[!Z!l%[!Z#f%[!Z#g%[!Z#h%[!Z#i%[!Z#j%[!Z#k%[!Z#l%[!Z#m%[!Z#n%[!Z#p%[!Z#r%[!Z#t%[!Z#u%[!Z#x%[!Z(Q%[!Z(`%[!Z!V%[!Z!W%[!Z~P%AYOo(UX~P1dO't!iO~P!'RO!ScX!VcX#WcX~P&6lOPYXXYXkYXyYXzYX|YX!VYX!VcX!eYX!fYX!hYX!lYX#WYX#WcX#ccX#fYX#gYX#hYX#iYX#jYX#kYX#lYX#mYX#nYX#pYX#rYX#tYX#uYX#zYX(QYX(`YX(gYX(hYX~O!acX!gYX!gcX(`cX~P'!sOP;nOQ;nOa=_Ob!fOikOk;nOlkOmkOskOu;nOw;nO|WO!QkO!RkO!XXO!c;qO!hZO!k;nO!l;nO!m;nO!o;rO!q;sO!t!eO$P!hO$TfO's)RO'uTO'xUO(QVO(_[O(l=]O~O!Vv!>v!BnPPP!BuHdPPPPPPPPPPP!FTP!GiPPHd!HyPHdPHdHdHdHdPHd!J`PP!MiP#!nP#!r#!|##Q##QP!MfP##U##UP#&ZP#&_HdHd#&e#)iAQPAQPAQAQP#*sAQAQ#,mAQ#.zAQ#0nAQAQ#1[#3W#3W#3[#3d#3W#3lP#3WPAQ#4hAQ#5pAQAQ6iPPP#6{PP#7e#7eP#7eP#7z#7ePP#8QP#7wP#7w#8d!1p#7w#9O#9U6f(}#9X(}P#9`#9`#9`P(}P(}P(}P(}PP(}P#9f#9iP#9i(}P#9mP#9pP(}P(}P(}P(}P(}P(}(}PP#9v#9|#:W#:^#:d#:j#:p#;O#;U#;[#;f#;l#b#?r#@Q#@W#@^#@d#@j#@t#@z#AQ#A[#An#AtPPPPPPPPPP#AzPPPPPPP#Bn#FYP#Gu#G|#HUPPPP#L`$ U$'t$'w$'z$)w$)z$)}$*UPP$*[$*`$+X$,X$,]$,qPP$,u$,{$-PP$-S$-W$-Z$.P$.g$.l$.o$.r$.x$.{$/P$/TR!yRmpOXr!X#a%]&d&f&g&i,^,c1g1jU!pQ'Q-OQ%ctQ%kwQ%rzQ&[!TS&x!c,vQ'W!f[']!m!r!s!t!u!vS*[$y*aQ+U%lQ+c%tQ+}&UQ,|'PQ-W'XW-`'^'_'`'aQ/p*cQ1U,OU2b-b-d-eS4}0z5QS6[2e2gU7z5U5V5WQ8q6_S9i7{7|Q:k9lR TypeParamList TypeDefinition extends ThisType this LiteralType ArithOp Number BooleanLiteral TemplateType InterpolationEnd Interpolation InterpolationStart NullType null VoidType void TypeofType typeof MemberExpression . ?. PropertyName [ TemplateString Escape Interpolation super RegExp ] ArrayExpression Spread , } { ObjectExpression Property async get set PropertyDefinition Block : NewExpression new TypeArgList CompareOp < ) ( ArgList UnaryExpression delete LogicOp BitOp YieldExpression yield AwaitExpression await ParenthesizedExpression ClassExpression class ClassBody MethodDeclaration Decorator @ MemberExpression PrivatePropertyName CallExpression Privacy static abstract override PrivatePropertyDefinition PropertyDeclaration readonly accessor Optional TypeAnnotation Equals StaticBlock FunctionExpression ArrowFunction ParamList ParamList ArrayPattern ObjectPattern PatternProperty Privacy readonly Arrow MemberExpression BinaryExpression ArithOp ArithOp ArithOp ArithOp BitOp CompareOp instanceof satisfies in const CompareOp BitOp BitOp BitOp LogicOp LogicOp ConditionalExpression LogicOp LogicOp AssignmentExpression UpdateOp PostfixExpression CallExpression TaggedTemplateExpression DynamicImport import ImportMeta JSXElement JSXSelfCloseEndTag JSXStartTag JSXSelfClosingTag JSXIdentifier JSXBuiltin JSXIdentifier JSXNamespacedName JSXMemberExpression JSXSpreadAttribute JSXAttribute JSXAttributeValue JSXEscape JSXEndTag JSXOpenTag JSXFragmentTag JSXText JSXEscape JSXStartCloseTag JSXCloseTag PrefixCast ArrowFunction TypeParamList SequenceExpression KeyofType keyof UniqueType unique ImportType InferredType infer TypeName ParenthesizedType FunctionSignature ParamList NewSignature IndexedType TupleType Label ArrayType ReadonlyType ObjectType MethodType PropertyType IndexSignature PropertyDefinition CallSignature TypePredicate is NewSignature new UnionType LogicOp IntersectionType LogicOp ConditionalType ParameterizedType ClassDeclaration abstract implements type VariableDeclaration let var TypeAliasDeclaration InterfaceDeclaration interface EnumDeclaration enum EnumBody NamespaceDeclaration namespace module AmbientDeclaration declare GlobalDeclaration global ClassDeclaration ClassBody MethodDeclaration AmbientFunctionDeclaration ExportGroup VariableName VariableName ImportDeclaration ImportGroup ForStatement for ForSpec ForInSpec ForOfSpec of WhileStatement while WithStatement with DoStatement do IfStatement if else SwitchStatement switch SwitchBody CaseLabel case DefaultLabel TryStatement try CatchClause catch FinallyClause finally ReturnStatement return ThrowStatement throw BreakStatement break ContinueStatement continue DebuggerStatement debugger LabeledStatement ExpressionStatement SingleExpression SingleClassItem",maxTerm:362,context:oO,nodeProps:[["group",-26,6,14,16,62,198,202,205,206,208,211,214,225,227,233,235,237,239,242,248,254,256,258,260,262,264,265,"Statement",-32,10,11,25,28,29,35,45,48,49,51,56,64,72,76,78,80,81,102,103,112,113,130,133,135,136,137,138,140,141,161,162,164,"Expression",-23,24,26,30,34,36,38,165,167,169,170,172,173,174,176,177,178,180,181,182,192,194,196,197,"Type",-3,84,95,101,"ClassItem"],["openedBy",31,"InterpolationStart",50,"[",54,"{",69,"(",142,"JSXStartTag",154,"JSXStartTag JSXStartCloseTag"],["closedBy",33,"InterpolationEnd",44,"]",55,"}",70,")",143,"JSXSelfCloseEndTag JSXEndTag",159,"JSXEndTag"]],propSources:[cO],skippedNodes:[0,3,4,268],repeatNodeCount:32,tokenData:"$>y(CSR!bOX%ZXY+gYZ-yZ[+g[]%Z]^.c^p%Zpq+gqr/mrs3cst:_tu>PuvBavwDxwxGgxyMvyz! Qz{!![{|!%O|}!&]}!O!%O!O!P!'g!P!Q!1w!Q!R#0t!R![#3T![!]#@T!]!^#Aa!^!_#Bk!_!`#GS!`!a#In!a!b#N{!b!c$$z!c!}>P!}#O$&U#O#P$'`#P#Q$,w#Q#R$.R#R#S>P#S#T$/`#T#o$0j#o#p$4z#p#q$5p#q#r$7Q#r#s$8^#s$f%Z$f$g+g$g#BY>P#BY#BZ$9h#BZ$IS>P$IS$I_$9h$I_$I|>P$I|$I}$P$JT$JU$9h$JU$KV>P$KV$KW$9h$KW&FU>P&FU&FV$9h&FV;'S>P;'S;=`BZ<%l?HT>P?HT?HU$9h?HUO>P(n%d_$c&j'vp'y!bOY%ZYZ&cZr%Zrs&}sw%Zwx(rx!^%Z!^!_*g!_#O%Z#O#P&c#P#o%Z#o#p*g#p;'S%Z;'S;=`+a<%lO%Z&j&hT$c&jO!^&c!_#o&c#p;'S&c;'S;=`&w<%lO&c&j&zP;=`<%l&c'|'U]$c&j'y!bOY&}YZ&cZw&}wx&cx!^&}!^!_'}!_#O&}#O#P&c#P#o&}#o#p'}#p;'S&};'S;=`(l<%lO&}!b(SU'y!bOY'}Zw'}x#O'}#P;'S'};'S;=`(f<%lO'}!b(iP;=`<%l'}'|(oP;=`<%l&}'[(y]$c&j'vpOY(rYZ&cZr(rrs&cs!^(r!^!_)r!_#O(r#O#P&c#P#o(r#o#p)r#p;'S(r;'S;=`*a<%lO(rp)wU'vpOY)rZr)rs#O)r#P;'S)r;'S;=`*Z<%lO)rp*^P;=`<%l)r'[*dP;=`<%l(r#S*nX'vp'y!bOY*gZr*grs'}sw*gwx)rx#O*g#P;'S*g;'S;=`+Z<%lO*g#S+^P;=`<%l*g(n+dP;=`<%l%Z(CS+rq$c&j'vp'y!b'l(;dOX%ZXY+gYZ&cZ[+g[p%Zpq+gqr%Zrs&}sw%Zwx(rx!^%Z!^!_*g!_#O%Z#O#P&c#P#o%Z#o#p*g#p$f%Z$f$g+g$g#BY%Z#BY#BZ+g#BZ$IS%Z$IS$I_+g$I_$JT%Z$JT$JU+g$JU$KV%Z$KV$KW+g$KW&FU%Z&FU&FV+g&FV;'S%Z;'S;=`+a<%l?HT%Z?HT?HU+g?HUO%Z(CS.ST'w#S$c&j'm(;dO!^&c!_#o&c#p;'S&c;'S;=`&w<%lO&c(CS.n_$c&j'vp'y!b'm(;dOY%ZYZ&cZr%Zrs&}sw%Zwx(rx!^%Z!^!_*g!_#O%Z#O#P&c#P#o%Z#o#p*g#p;'S%Z;'S;=`+a<%lO%Z%#`/x`$c&j!l$Ip'vp'y!bOY%ZYZ&cZr%Zrs&}sw%Zwx(rx!^%Z!^!_*g!_!`0z!`#O%Z#O#P&c#P#o%Z#o#p*g#p;'S%Z;'S;=`+a<%lO%Z%#S1V`#p$Id$c&j'vp'y!bOY%ZYZ&cZr%Zrs&}sw%Zwx(rx!^%Z!^!_*g!_!`2X!`#O%Z#O#P&c#P#o%Z#o#p*g#p;'S%Z;'S;=`+a<%lO%Z%#S2d_#p$Id$c&j'vp'y!bOY%ZYZ&cZr%Zrs&}sw%Zwx(rx!^%Z!^!_*g!_#O%Z#O#P&c#P#o%Z#o#p*g#p;'S%Z;'S;=`+a<%lO%Z$2b3l_'u$(n$c&j'y!bOY4kYZ5qZr4krs7nsw4kwx5qx!^4k!^!_8p!_#O4k#O#P5q#P#o4k#o#p8p#p;'S4k;'S;=`:X<%lO4k*r4r_$c&j'y!bOY4kYZ5qZr4krs7nsw4kwx5qx!^4k!^!_8p!_#O4k#O#P5q#P#o4k#o#p8p#p;'S4k;'S;=`:X<%lO4k)`5vX$c&jOr5qrs6cs!^5q!^!_6y!_#o5q#o#p6y#p;'S5q;'S;=`7h<%lO5q)`6jT$^#t$c&jO!^&c!_#o&c#p;'S&c;'S;=`&w<%lO&c#t6|TOr6yrs7]s;'S6y;'S;=`7b<%lO6y#t7bO$^#t#t7eP;=`<%l6y)`7kP;=`<%l5q*r7w]$^#t$c&j'y!bOY&}YZ&cZw&}wx&cx!^&}!^!_'}!_#O&}#O#P&c#P#o&}#o#p'}#p;'S&};'S;=`(l<%lO&}%W8uZ'y!bOY8pYZ6yZr8prs9hsw8pwx6yx#O8p#O#P6y#P;'S8p;'S;=`:R<%lO8p%W9oU$^#t'y!bOY'}Zw'}x#O'}#P;'S'};'S;=`(f<%lO'}%W:UP;=`<%l8p*r:[P;=`<%l4k#%|:hg$c&j'vp'y!bOY%ZYZ&cZr%Zrs&}st%Ztu`k$c&j'vp'y!b(T!LY's&;d$V#tOY%ZYZ&cZr%Zrs&}st%Ztu>Puw%Zwx(rx}%Z}!O@T!O!Q%Z!Q![>P![!^%Z!^!_*g!_!c%Z!c!}>P!}#O%Z#O#P&c#P#R%Z#R#S>P#S#T%Z#T#o>P#o#p*g#p$g%Z$g;'S>P;'S;=`BZ<%lO>P+d@`k$c&j'vp'y!b$V#tOY%ZYZ&cZr%Zrs&}st%Ztu@Tuw%Zwx(rx}%Z}!O@T!O!Q%Z!Q![@T![!^%Z!^!_*g!_!c%Z!c!}@T!}#O%Z#O#P&c#P#R%Z#R#S@T#S#T%Z#T#o@T#o#p*g#p$g%Z$g;'S@T;'S;=`BT<%lO@T+dBWP;=`<%l@T(CSB^P;=`<%l>P%#SBl`$c&j'vp'y!b#h$IdOY%ZYZ&cZr%Zrs&}sw%Zwx(rx!^%Z!^!_*g!_!`Cn!`#O%Z#O#P&c#P#o%Z#o#p*g#p;'S%Z;'S;=`+a<%lO%Z%#SCy_$c&j#z$Id'vp'y!bOY%ZYZ&cZr%Zrs&}sw%Zwx(rx!^%Z!^!_*g!_#O%Z#O#P&c#P#o%Z#o#p*g#p;'S%Z;'S;=`+a<%lO%Z%DfETa(h%Z![!^%Z!^!_*g!_!c%Z!c!i#>Z!i#O%Z#O#P&c#P#R%Z#R#S#>Z#S#T%Z#T#Z#>Z#Z#o%Z#o#p*g#p;'S%Z;'S;=`+a<%lO%Z$/l#>fi$c&j'vp'y!bl$'|OY%ZYZ&cZr%Zrs&}sw%Zwx(rx!Q%Z!Q![#>Z![!^%Z!^!_*g!_!c%Z!c!i#>Z!i#O%Z#O#P&c#P#R%Z#R#S#>Z#S#T%Z#T#Z#>Z#Z#b%Z#b#c#5T#c#o%Z#o#p*g#p;'S%Z;'S;=`+a<%lO%Z%Gh#@b_!a$b$c&j#x%Puw%Zwx(rx}%Z}!O@T!O!Q%Z!Q![>P![!^%Z!^!_*g!_!c%Z!c!}>P!}#O%Z#O#P&c#P#R%Z#R#S>P#S#T%Z#T#o>P#o#p*g#p$f%Z$f$g+g$g#BY>P#BY#BZ$9h#BZ$IS>P$IS$I_$9h$I_$JT>P$JT$JU$9h$JU$KV>P$KV$KW$9h$KW&FU>P&FU&FV$9h&FV;'S>P;'S;=`BZ<%l?HT>P?HT?HU$9h?HUO>P(CS$=Uk$c&j'vp'y!b'm(;d(T!LY's&;d$V#tOY%ZYZ&cZr%Zrs&}st%Ztu>Puw%Zwx(rx}%Z}!O@T!O!Q%Z!Q![>P![!^%Z!^!_*g!_!c%Z!c!}>P!}#O%Z#O#P&c#P#R%Z#R#S>P#S#T%Z#T#o>P#o#p*g#p$g%Z$g;'S>P;'S;=`BZ<%lO>P",tokenizers:[lO,XO,2,3,4,5,6,7,8,9,10,11,12,13,ZO,new u("$S~RRtu[#O#Pg#S#T#|~_P#o#pb~gOq~~jVO#i!P#i#j!U#j#l!P#l#m!q#m;'S!P;'S;=`#v<%lO!P~!UO!O~~!XS!Q![!e!c!i!e#T#Z!e#o#p#Z~!hR!Q![!q!c!i!q#T#Z!q~!tR!Q![!}!c!i!}#T#Z!}~#QR!Q![!P!c!i!P#T#Z!P~#^R!Q![#g!c!i#g#T#Z#g~#jS!Q![#g!c!i#g#T#Z#g#q#r!P~#yP;=`<%l!P~$RO(S~~",141,325),new u("j~RQYZXz{^~^O'p~~aP!P!Qd~iO'q~~",25,307)],topRules:{Script:[0,5],SingleExpression:[1,266],SingleClassItem:[2,267]},dialects:{jsx:13213,ts:13215},dynamicPrecedences:{76:1,78:1,162:1,190:1},specialized:[{term:311,get:O=>sO[O]||-1},{term:327,get:O=>pO[O]||-1},{term:67,get:O=>gO[O]||-1}],tokenPrec:13238}),bO=[n("function ${name}(${params}) {\n ${}\n}",{label:"function",detail:"definition",type:"keyword"}),n("for (let ${index} = 0; ${index} < ${bound}; ${index}++) {\n ${}\n}",{label:"for",detail:"loop",type:"keyword"}),n("for (let ${name} of ${collection}) {\n ${}\n}",{label:"for",detail:"of loop",type:"keyword"}),n("do {\n ${}\n} while (${})",{label:"do",detail:"loop",type:"keyword"}),n("while (${}) {\n ${}\n}",{label:"while",detail:"loop",type:"keyword"}),n(`try { - \${} -} catch (\${error}) { - \${} -}`,{label:"try",detail:"/ catch block",type:"keyword"}),n("if (${}) {\n ${}\n}",{label:"if",detail:"block",type:"keyword"}),n(`if (\${}) { - \${} -} else { - \${} -}`,{label:"if",detail:"/ else block",type:"keyword"}),n(`class \${name} { - constructor(\${params}) { - \${} - } -}`,{label:"class",detail:"definition",type:"keyword"}),n('import {${names}} from "${module}"\n${}',{label:"import",detail:"named",type:"keyword"}),n('import ${name} from "${module}"\n${}',{label:"import",detail:"default",type:"keyword"})],v=new OO,G=new Set(["Script","Block","FunctionExpression","FunctionDeclaration","ArrowFunction","MethodDeclaration","ForStatement"]);function c(O){return(Q,i)=>{let a=Q.node.getChild("VariableDefinition");return a&&i(a,O),!0}}const hO=["FunctionDeclaration"],mO={FunctionDeclaration:c("function"),ClassDeclaration:c("class"),ClassExpression:()=>!0,EnumDeclaration:c("constant"),TypeAliasDeclaration:c("type"),NamespaceDeclaration:c("namespace"),VariableDefinition(O,Q){O.matchContext(hO)||Q(O,"variable")},TypeDefinition(O,Q){Q(O,"type")},__proto__:null};function q(O,Q){let i=v.get(Q);if(i)return i;let a=[],$=!0;function t(r,S){let o=O.sliceString(r.from,r.to);a.push({label:o,type:S})}return Q.cursor(M.IncludeAnonymous).iterate(r=>{if($)$=!1;else if(r.name){let S=mO[r.name];if(S&&S(r,t)||G.has(r.name))return!1}else if(r.to-r.from>8192){for(let S of q(O,r.node))a.push(S);return!1}}),v.set(Q,a),a}const g=/^[\w$\xa1-\uffff][\w$\d\xa1-\uffff]*$/,U=["TemplateString","String","RegExp","LineComment","BlockComment","VariableDefinition","TypeDefinition","Label","PropertyDefinition","PropertyName","PrivatePropertyDefinition","PrivatePropertyName"];function WO(O){let Q=W(O.state).resolveInner(O.pos,-1);if(U.indexOf(Q.name)>-1)return null;let i=Q.name=="VariableName"||Q.to-Q.from<20&&g.test(O.state.sliceDoc(Q.from,Q.to));if(!i&&!O.explicit)return null;let a=[];for(let $=Q;$;$=$.parent)G.has($.name)&&(a=a.concat(q(O.state.doc,$)));return{options:a,from:i?Q.from:O.pos,validFor:g}}function h(O,Q,i){var a;let $=[];for(;;){let t=Q.firstChild,r;if(t?.name=="VariableName")return $.push(O(t)),{path:$.reverse(),name:i};if(t?.name=="MemberExpression"&&((a=r=t.lastChild)===null||a===void 0?void 0:a.name)=="PropertyName")$.push(O(r)),Q=t;else return null}}function UO(O){let Q=a=>O.state.doc.sliceString(a.from,a.to),i=W(O.state).resolveInner(O.pos,-1);return i.name=="PropertyName"?h(Q,i.parent,Q(i)):U.indexOf(i.name)>-1?null:i.name=="VariableName"||i.to-i.from<20&&g.test(Q(i))?{path:[],name:Q(i)}:(i.name=="."||i.name=="?.")&&i.parent.name=="MemberExpression"?h(Q,i.parent,""):i.name=="MemberExpression"?h(Q,i,""):O.explicit?{path:[],name:""}:null}function fO(O,Q){let i=[],a=new Set;for(let $=0;;$++){for(let r of(Object.getOwnPropertyNames||Object.keys)(O)){if(a.has(r))continue;a.add(r);let S;try{S=O[r]}catch{continue}i.push({label:r,type:typeof S=="function"?/^[A-Z]/.test(r)?"class":Q?"function":"method":Q?"variable":"property",boost:-$})}let t=Object.getPrototypeOf(O);if(!t)return i;O=t}}function JO(O){let Q=new Map;return i=>{let a=UO(i);if(!a)return null;let $=O;for(let r of a.path)if($=$[r],!$)return null;let t=Q.get($);return t||Q.set($,t=fO($,!a.path.length)),{from:i.pos-a.name.length,options:t,validFor:g}}}const X=I.define({name:"javascript",parser:YO.configure({props:[E.add({IfStatement:Y({except:/^\s*({|else\b)/}),TryStatement:Y({except:/^\s*({|catch\b|finally\b)/}),LabeledStatement:A,SwitchBody:O=>{let Q=O.textAfter,i=/^\s*\}/.test(Q),a=/^\s*(case|default)\b/.test(Q);return O.baseIndent+(i?0:a?1:2)*O.unit},Block:J({closing:"}"}),ArrowFunction:O=>O.baseIndent+O.unit,"TemplateString BlockComment":()=>null,"Statement Property":Y({except:/^{/}),JSXElement(O){let Q=/^\s*<\//.test(O.textAfter);return O.lineIndent(O.node.from)+(Q?0:O.unit)},JSXEscape(O){let Q=/\s*\}/.test(O.textAfter);return O.lineIndent(O.node.from)+(Q?0:O.unit)},"JSXOpenTag JSXSelfClosingTag"(O){return O.column(O.node.from)+O.unit}}),L.add({"Block ClassBody SwitchBody EnumBody ObjectExpression ArrayExpression":N,BlockComment(O){return{from:O.from+2,to:O.to-2}}})]}),languageData:{closeBrackets:{brackets:["(","[","{","'",'"',"`"]},commentTokens:{line:"//",block:{open:"/*",close:"*/"}},indentOnInput:/^\s*(?:case |default:|\{|\}|<\/)$/,wordChars:"$"}}),T={test:O=>/^JSX/.test(O.name),facet:F({commentTokens:{block:{open:"{/*",close:"*/}"}}})},uO=X.configure({dialect:"ts"},"typescript"),yO=X.configure({dialect:"jsx",props:[k.add(O=>O.isTop?[T]:void 0)]}),jO=X.configure({dialect:"jsx ts",props:[k.add(O=>O.isTop?[T]:void 0)]},"typescript"),dO="break case const continue default delete export extends false finally in instanceof let new return static super switch this throw true typeof var yield".split(" ").map(O=>({label:O,type:"keyword"}));function LO(O={}){let Q=O.jsx?O.typescript?jO:yO:O.typescript?uO:X;return new D(Q,[X.data.of({autocomplete:B(U,H(bO.concat(dO)))}),X.data.of({autocomplete:WO}),O.jsx?wO:[]])}function xO(O){for(;;){if(O.name=="JSXOpenTag"||O.name=="JSXSelfClosingTag"||O.name=="JSXFragmentTag")return O;if(!O.parent)return null;O=O.parent}}function w(O,Q,i=O.length){for(let a=Q?.firstChild;a;a=a.nextSibling)if(a.name=="JSXIdentifier"||a.name=="JSXBuiltin"||a.name=="JSXNamespacedName"||a.name=="JSXMemberExpression")return O.sliceString(a.from,Math.min(a.to,i));return""}const vO=typeof navigator=="object"&&/Android\b/.test(navigator.userAgent),wO=K.inputHandler.of((O,Q,i,a)=>{if((vO?O.composing:O.compositionStarted)||O.state.readOnly||Q!=i||a!=">"&&a!="/"||!X.isActiveAt(O.state,Q,-1))return!1;let{state:$}=O,t=$.changeByRange(r=>{var S,o;let{head:P}=r,Z=W($).resolveInner(P,-1),s;if(Z.name=="JSXStartTag"&&(Z=Z.parent),a==">"&&Z.name=="JSXFragmentTag")return{range:b.cursor(P+1),changes:{from:P,insert:">"}};if(a=="/"&&Z.name=="JSXFragmentTag"){let l=Z.parent,p=l?.parent;if(l.from==P-1&&((S=p.lastChild)===null||S===void 0?void 0:S.name)!="JSXEndTag"&&(s=w($.doc,p?.firstChild,P))){let f=`/${s}>`;return{range:b.cursor(P+f.length),changes:{from:P,insert:f}}}}else if(a==">"){let l=xO(Z);if(l&&((o=l.lastChild)===null||o===void 0?void 0:o.name)!="JSXEndTag"&&$.sliceDoc(P,P+2)!="`}}}return{range:r}});return t.changes.empty?!1:(O.dispatch(t,{userEvent:"input.type",scrollIntoView:!0}),!0)});function NO(O,Q){return Q||(Q={parserOptions:{ecmaVersion:2019,sourceType:"module"},env:{browser:!0,node:!0,es6:!0,es2015:!0,es2017:!0,es2020:!0},rules:{}},O.getRules().forEach((i,a)=>{i.meta.docs.recommended&&(Q.rules[a]=2)})),i=>{let{state:a}=i,$=[];for(let{from:t,to:r}of X.findRegions(a)){let S=a.doc.lineAt(t),o={line:S.number-1,col:t-S.from,pos:t};for(let P of O.verify(a.sliceDoc(t,r),Q))$.push(VO(P,a.doc,o))}return $}}function V(O,Q,i,a){return i.line(O+a.line).from+Q+(O==1?a.col-1:-1)}function VO(O,Q,i){let a=V(O.line,O.column,Q,i),$={from:a,to:O.endLine!=null&&O.endColumn!=1?V(O.endLine,O.endColumn,Q,i):a,message:O.message,source:O.ruleId?"eslint:"+O.ruleId:"eslint",severity:O.severity==1?"warning":"error"};if(O.fix){let{range:t,text:r}=O.fix,S=t[0]+i.pos-a,o=t[1]+i.pos-a;$.actions=[{name:"fix",apply(P,Z){P.dispatch({changes:{from:Z+S,to:Z+o,insert:r},scrollIntoView:!0})}}]}return $}export{wO as autoCloseTags,UO as completionPath,NO as esLint,LO as javascript,X as javascriptLanguage,yO as jsxLanguage,WO as localCompletionSource,JO as scopeCompletionSource,bO as snippets,jO as tsxLanguage,uO as typescriptLanguage}; -//# sourceMappingURL=index-ef54ac87.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jinja2/parser.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jinja2/parser.py deleted file mode 100644 index cefce2dfa1d2a4171838b0d0135af8ea3ff7d62c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jinja2/parser.py +++ /dev/null @@ -1,1032 +0,0 @@ -"""Parse tokens from the lexer into nodes for the compiler.""" -import typing -import typing as t - -from . import nodes -from .exceptions import TemplateAssertionError -from .exceptions import TemplateSyntaxError -from .lexer import describe_token -from .lexer import describe_token_expr - -if t.TYPE_CHECKING: - import typing_extensions as te - from .environment import Environment - -_ImportInclude = t.TypeVar("_ImportInclude", nodes.Import, nodes.Include) -_MacroCall = t.TypeVar("_MacroCall", nodes.Macro, nodes.CallBlock) - -_statement_keywords = frozenset( - [ - "for", - "if", - "block", - "extends", - "print", - "macro", - "include", - "from", - "import", - "set", - "with", - "autoescape", - ] -) -_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"]) - -_math_nodes: t.Dict[str, t.Type[nodes.Expr]] = { - "add": nodes.Add, - "sub": nodes.Sub, - "mul": nodes.Mul, - "div": nodes.Div, - "floordiv": nodes.FloorDiv, - "mod": nodes.Mod, -} - - -class Parser: - """This is the central parsing class Jinja uses. It's passed to - extensions and can be used to parse expressions or statements. - """ - - def __init__( - self, - environment: "Environment", - source: str, - name: t.Optional[str] = None, - filename: t.Optional[str] = None, - state: t.Optional[str] = None, - ) -> None: - self.environment = environment - self.stream = environment._tokenize(source, name, filename, state) - self.name = name - self.filename = filename - self.closed = False - self.extensions: t.Dict[ - str, t.Callable[["Parser"], t.Union[nodes.Node, t.List[nodes.Node]]] - ] = {} - for extension in environment.iter_extensions(): - for tag in extension.tags: - self.extensions[tag] = extension.parse - self._last_identifier = 0 - self._tag_stack: t.List[str] = [] - self._end_token_stack: t.List[t.Tuple[str, ...]] = [] - - def fail( - self, - msg: str, - lineno: t.Optional[int] = None, - exc: t.Type[TemplateSyntaxError] = TemplateSyntaxError, - ) -> "te.NoReturn": - """Convenience method that raises `exc` with the message, passed - line number or last line number as well as the current name and - filename. - """ - if lineno is None: - lineno = self.stream.current.lineno - raise exc(msg, lineno, self.name, self.filename) - - def _fail_ut_eof( - self, - name: t.Optional[str], - end_token_stack: t.List[t.Tuple[str, ...]], - lineno: t.Optional[int], - ) -> "te.NoReturn": - expected: t.Set[str] = set() - for exprs in end_token_stack: - expected.update(map(describe_token_expr, exprs)) - if end_token_stack: - currently_looking: t.Optional[str] = " or ".join( - map(repr, map(describe_token_expr, end_token_stack[-1])) - ) - else: - currently_looking = None - - if name is None: - message = ["Unexpected end of template."] - else: - message = [f"Encountered unknown tag {name!r}."] - - if currently_looking: - if name is not None and name in expected: - message.append( - "You probably made a nesting mistake. Jinja is expecting this tag," - f" but currently looking for {currently_looking}." - ) - else: - message.append( - f"Jinja was looking for the following tags: {currently_looking}." - ) - - if self._tag_stack: - message.append( - "The innermost block that needs to be closed is" - f" {self._tag_stack[-1]!r}." - ) - - self.fail(" ".join(message), lineno) - - def fail_unknown_tag( - self, name: str, lineno: t.Optional[int] = None - ) -> "te.NoReturn": - """Called if the parser encounters an unknown tag. Tries to fail - with a human readable error message that could help to identify - the problem. - """ - self._fail_ut_eof(name, self._end_token_stack, lineno) - - def fail_eof( - self, - end_tokens: t.Optional[t.Tuple[str, ...]] = None, - lineno: t.Optional[int] = None, - ) -> "te.NoReturn": - """Like fail_unknown_tag but for end of template situations.""" - stack = list(self._end_token_stack) - if end_tokens is not None: - stack.append(end_tokens) - self._fail_ut_eof(None, stack, lineno) - - def is_tuple_end( - self, extra_end_rules: t.Optional[t.Tuple[str, ...]] = None - ) -> bool: - """Are we at the end of a tuple?""" - if self.stream.current.type in ("variable_end", "block_end", "rparen"): - return True - elif extra_end_rules is not None: - return self.stream.current.test_any(extra_end_rules) # type: ignore - return False - - def free_identifier(self, lineno: t.Optional[int] = None) -> nodes.InternalName: - """Return a new free identifier as :class:`~jinja2.nodes.InternalName`.""" - self._last_identifier += 1 - rv = object.__new__(nodes.InternalName) - nodes.Node.__init__(rv, f"fi{self._last_identifier}", lineno=lineno) - return rv - - def parse_statement(self) -> t.Union[nodes.Node, t.List[nodes.Node]]: - """Parse a single statement.""" - token = self.stream.current - if token.type != "name": - self.fail("tag name expected", token.lineno) - self._tag_stack.append(token.value) - pop_tag = True - try: - if token.value in _statement_keywords: - f = getattr(self, f"parse_{self.stream.current.value}") - return f() # type: ignore - if token.value == "call": - return self.parse_call_block() - if token.value == "filter": - return self.parse_filter_block() - ext = self.extensions.get(token.value) - if ext is not None: - return ext(self) - - # did not work out, remove the token we pushed by accident - # from the stack so that the unknown tag fail function can - # produce a proper error message. - self._tag_stack.pop() - pop_tag = False - self.fail_unknown_tag(token.value, token.lineno) - finally: - if pop_tag: - self._tag_stack.pop() - - def parse_statements( - self, end_tokens: t.Tuple[str, ...], drop_needle: bool = False - ) -> t.List[nodes.Node]: - """Parse multiple statements into a list until one of the end tokens - is reached. This is used to parse the body of statements as it also - parses template data if appropriate. The parser checks first if the - current token is a colon and skips it if there is one. Then it checks - for the block end and parses until if one of the `end_tokens` is - reached. Per default the active token in the stream at the end of - the call is the matched end token. If this is not wanted `drop_needle` - can be set to `True` and the end token is removed. - """ - # the first token may be a colon for python compatibility - self.stream.skip_if("colon") - - # in the future it would be possible to add whole code sections - # by adding some sort of end of statement token and parsing those here. - self.stream.expect("block_end") - result = self.subparse(end_tokens) - - # we reached the end of the template too early, the subparser - # does not check for this, so we do that now - if self.stream.current.type == "eof": - self.fail_eof(end_tokens) - - if drop_needle: - next(self.stream) - return result - - def parse_set(self) -> t.Union[nodes.Assign, nodes.AssignBlock]: - """Parse an assign statement.""" - lineno = next(self.stream).lineno - target = self.parse_assign_target(with_namespace=True) - if self.stream.skip_if("assign"): - expr = self.parse_tuple() - return nodes.Assign(target, expr, lineno=lineno) - filter_node = self.parse_filter(None) - body = self.parse_statements(("name:endset",), drop_needle=True) - return nodes.AssignBlock(target, filter_node, body, lineno=lineno) - - def parse_for(self) -> nodes.For: - """Parse a for loop.""" - lineno = self.stream.expect("name:for").lineno - target = self.parse_assign_target(extra_end_rules=("name:in",)) - self.stream.expect("name:in") - iter = self.parse_tuple( - with_condexpr=False, extra_end_rules=("name:recursive",) - ) - test = None - if self.stream.skip_if("name:if"): - test = self.parse_expression() - recursive = self.stream.skip_if("name:recursive") - body = self.parse_statements(("name:endfor", "name:else")) - if next(self.stream).value == "endfor": - else_ = [] - else: - else_ = self.parse_statements(("name:endfor",), drop_needle=True) - return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno) - - def parse_if(self) -> nodes.If: - """Parse an if construct.""" - node = result = nodes.If(lineno=self.stream.expect("name:if").lineno) - while True: - node.test = self.parse_tuple(with_condexpr=False) - node.body = self.parse_statements(("name:elif", "name:else", "name:endif")) - node.elif_ = [] - node.else_ = [] - token = next(self.stream) - if token.test("name:elif"): - node = nodes.If(lineno=self.stream.current.lineno) - result.elif_.append(node) - continue - elif token.test("name:else"): - result.else_ = self.parse_statements(("name:endif",), drop_needle=True) - break - return result - - def parse_with(self) -> nodes.With: - node = nodes.With(lineno=next(self.stream).lineno) - targets: t.List[nodes.Expr] = [] - values: t.List[nodes.Expr] = [] - while self.stream.current.type != "block_end": - if targets: - self.stream.expect("comma") - target = self.parse_assign_target() - target.set_ctx("param") - targets.append(target) - self.stream.expect("assign") - values.append(self.parse_expression()) - node.targets = targets - node.values = values - node.body = self.parse_statements(("name:endwith",), drop_needle=True) - return node - - def parse_autoescape(self) -> nodes.Scope: - node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno) - node.options = [nodes.Keyword("autoescape", self.parse_expression())] - node.body = self.parse_statements(("name:endautoescape",), drop_needle=True) - return nodes.Scope([node]) - - def parse_block(self) -> nodes.Block: - node = nodes.Block(lineno=next(self.stream).lineno) - node.name = self.stream.expect("name").value - node.scoped = self.stream.skip_if("name:scoped") - node.required = self.stream.skip_if("name:required") - - # common problem people encounter when switching from django - # to jinja. we do not support hyphens in block names, so let's - # raise a nicer error message in that case. - if self.stream.current.type == "sub": - self.fail( - "Block names in Jinja have to be valid Python identifiers and may not" - " contain hyphens, use an underscore instead." - ) - - node.body = self.parse_statements(("name:endblock",), drop_needle=True) - - # enforce that required blocks only contain whitespace or comments - # by asserting that the body, if not empty, is just TemplateData nodes - # with whitespace data - if node.required and not all( - isinstance(child, nodes.TemplateData) and child.data.isspace() - for body in node.body - for child in body.nodes # type: ignore - ): - self.fail("Required blocks can only contain comments or whitespace") - - self.stream.skip_if("name:" + node.name) - return node - - def parse_extends(self) -> nodes.Extends: - node = nodes.Extends(lineno=next(self.stream).lineno) - node.template = self.parse_expression() - return node - - def parse_import_context( - self, node: _ImportInclude, default: bool - ) -> _ImportInclude: - if self.stream.current.test_any( - "name:with", "name:without" - ) and self.stream.look().test("name:context"): - node.with_context = next(self.stream).value == "with" - self.stream.skip() - else: - node.with_context = default - return node - - def parse_include(self) -> nodes.Include: - node = nodes.Include(lineno=next(self.stream).lineno) - node.template = self.parse_expression() - if self.stream.current.test("name:ignore") and self.stream.look().test( - "name:missing" - ): - node.ignore_missing = True - self.stream.skip(2) - else: - node.ignore_missing = False - return self.parse_import_context(node, True) - - def parse_import(self) -> nodes.Import: - node = nodes.Import(lineno=next(self.stream).lineno) - node.template = self.parse_expression() - self.stream.expect("name:as") - node.target = self.parse_assign_target(name_only=True).name - return self.parse_import_context(node, False) - - def parse_from(self) -> nodes.FromImport: - node = nodes.FromImport(lineno=next(self.stream).lineno) - node.template = self.parse_expression() - self.stream.expect("name:import") - node.names = [] - - def parse_context() -> bool: - if self.stream.current.value in { - "with", - "without", - } and self.stream.look().test("name:context"): - node.with_context = next(self.stream).value == "with" - self.stream.skip() - return True - return False - - while True: - if node.names: - self.stream.expect("comma") - if self.stream.current.type == "name": - if parse_context(): - break - target = self.parse_assign_target(name_only=True) - if target.name.startswith("_"): - self.fail( - "names starting with an underline can not be imported", - target.lineno, - exc=TemplateAssertionError, - ) - if self.stream.skip_if("name:as"): - alias = self.parse_assign_target(name_only=True) - node.names.append((target.name, alias.name)) - else: - node.names.append(target.name) - if parse_context() or self.stream.current.type != "comma": - break - else: - self.stream.expect("name") - if not hasattr(node, "with_context"): - node.with_context = False - return node - - def parse_signature(self, node: _MacroCall) -> None: - args = node.args = [] - defaults = node.defaults = [] - self.stream.expect("lparen") - while self.stream.current.type != "rparen": - if args: - self.stream.expect("comma") - arg = self.parse_assign_target(name_only=True) - arg.set_ctx("param") - if self.stream.skip_if("assign"): - defaults.append(self.parse_expression()) - elif defaults: - self.fail("non-default argument follows default argument") - args.append(arg) - self.stream.expect("rparen") - - def parse_call_block(self) -> nodes.CallBlock: - node = nodes.CallBlock(lineno=next(self.stream).lineno) - if self.stream.current.type == "lparen": - self.parse_signature(node) - else: - node.args = [] - node.defaults = [] - - call_node = self.parse_expression() - if not isinstance(call_node, nodes.Call): - self.fail("expected call", node.lineno) - node.call = call_node - node.body = self.parse_statements(("name:endcall",), drop_needle=True) - return node - - def parse_filter_block(self) -> nodes.FilterBlock: - node = nodes.FilterBlock(lineno=next(self.stream).lineno) - node.filter = self.parse_filter(None, start_inline=True) # type: ignore - node.body = self.parse_statements(("name:endfilter",), drop_needle=True) - return node - - def parse_macro(self) -> nodes.Macro: - node = nodes.Macro(lineno=next(self.stream).lineno) - node.name = self.parse_assign_target(name_only=True).name - self.parse_signature(node) - node.body = self.parse_statements(("name:endmacro",), drop_needle=True) - return node - - def parse_print(self) -> nodes.Output: - node = nodes.Output(lineno=next(self.stream).lineno) - node.nodes = [] - while self.stream.current.type != "block_end": - if node.nodes: - self.stream.expect("comma") - node.nodes.append(self.parse_expression()) - return node - - @typing.overload - def parse_assign_target( - self, with_tuple: bool = ..., name_only: "te.Literal[True]" = ... - ) -> nodes.Name: - ... - - @typing.overload - def parse_assign_target( - self, - with_tuple: bool = True, - name_only: bool = False, - extra_end_rules: t.Optional[t.Tuple[str, ...]] = None, - with_namespace: bool = False, - ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]: - ... - - def parse_assign_target( - self, - with_tuple: bool = True, - name_only: bool = False, - extra_end_rules: t.Optional[t.Tuple[str, ...]] = None, - with_namespace: bool = False, - ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]: - """Parse an assignment target. As Jinja allows assignments to - tuples, this function can parse all allowed assignment targets. Per - default assignments to tuples are parsed, that can be disable however - by setting `with_tuple` to `False`. If only assignments to names are - wanted `name_only` can be set to `True`. The `extra_end_rules` - parameter is forwarded to the tuple parsing function. If - `with_namespace` is enabled, a namespace assignment may be parsed. - """ - target: nodes.Expr - - if with_namespace and self.stream.look().type == "dot": - token = self.stream.expect("name") - next(self.stream) # dot - attr = self.stream.expect("name") - target = nodes.NSRef(token.value, attr.value, lineno=token.lineno) - elif name_only: - token = self.stream.expect("name") - target = nodes.Name(token.value, "store", lineno=token.lineno) - else: - if with_tuple: - target = self.parse_tuple( - simplified=True, extra_end_rules=extra_end_rules - ) - else: - target = self.parse_primary() - - target.set_ctx("store") - - if not target.can_assign(): - self.fail( - f"can't assign to {type(target).__name__.lower()!r}", target.lineno - ) - - return target # type: ignore - - def parse_expression(self, with_condexpr: bool = True) -> nodes.Expr: - """Parse an expression. Per default all expressions are parsed, if - the optional `with_condexpr` parameter is set to `False` conditional - expressions are not parsed. - """ - if with_condexpr: - return self.parse_condexpr() - return self.parse_or() - - def parse_condexpr(self) -> nodes.Expr: - lineno = self.stream.current.lineno - expr1 = self.parse_or() - expr3: t.Optional[nodes.Expr] - - while self.stream.skip_if("name:if"): - expr2 = self.parse_or() - if self.stream.skip_if("name:else"): - expr3 = self.parse_condexpr() - else: - expr3 = None - expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno) - lineno = self.stream.current.lineno - return expr1 - - def parse_or(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_and() - while self.stream.skip_if("name:or"): - right = self.parse_and() - left = nodes.Or(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_and(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_not() - while self.stream.skip_if("name:and"): - right = self.parse_not() - left = nodes.And(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_not(self) -> nodes.Expr: - if self.stream.current.test("name:not"): - lineno = next(self.stream).lineno - return nodes.Not(self.parse_not(), lineno=lineno) - return self.parse_compare() - - def parse_compare(self) -> nodes.Expr: - lineno = self.stream.current.lineno - expr = self.parse_math1() - ops = [] - while True: - token_type = self.stream.current.type - if token_type in _compare_operators: - next(self.stream) - ops.append(nodes.Operand(token_type, self.parse_math1())) - elif self.stream.skip_if("name:in"): - ops.append(nodes.Operand("in", self.parse_math1())) - elif self.stream.current.test("name:not") and self.stream.look().test( - "name:in" - ): - self.stream.skip(2) - ops.append(nodes.Operand("notin", self.parse_math1())) - else: - break - lineno = self.stream.current.lineno - if not ops: - return expr - return nodes.Compare(expr, ops, lineno=lineno) - - def parse_math1(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_concat() - while self.stream.current.type in ("add", "sub"): - cls = _math_nodes[self.stream.current.type] - next(self.stream) - right = self.parse_concat() - left = cls(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_concat(self) -> nodes.Expr: - lineno = self.stream.current.lineno - args = [self.parse_math2()] - while self.stream.current.type == "tilde": - next(self.stream) - args.append(self.parse_math2()) - if len(args) == 1: - return args[0] - return nodes.Concat(args, lineno=lineno) - - def parse_math2(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_pow() - while self.stream.current.type in ("mul", "div", "floordiv", "mod"): - cls = _math_nodes[self.stream.current.type] - next(self.stream) - right = self.parse_pow() - left = cls(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_pow(self) -> nodes.Expr: - lineno = self.stream.current.lineno - left = self.parse_unary() - while self.stream.current.type == "pow": - next(self.stream) - right = self.parse_unary() - left = nodes.Pow(left, right, lineno=lineno) - lineno = self.stream.current.lineno - return left - - def parse_unary(self, with_filter: bool = True) -> nodes.Expr: - token_type = self.stream.current.type - lineno = self.stream.current.lineno - node: nodes.Expr - - if token_type == "sub": - next(self.stream) - node = nodes.Neg(self.parse_unary(False), lineno=lineno) - elif token_type == "add": - next(self.stream) - node = nodes.Pos(self.parse_unary(False), lineno=lineno) - else: - node = self.parse_primary() - node = self.parse_postfix(node) - if with_filter: - node = self.parse_filter_expr(node) - return node - - def parse_primary(self) -> nodes.Expr: - token = self.stream.current - node: nodes.Expr - if token.type == "name": - if token.value in ("true", "false", "True", "False"): - node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno) - elif token.value in ("none", "None"): - node = nodes.Const(None, lineno=token.lineno) - else: - node = nodes.Name(token.value, "load", lineno=token.lineno) - next(self.stream) - elif token.type == "string": - next(self.stream) - buf = [token.value] - lineno = token.lineno - while self.stream.current.type == "string": - buf.append(self.stream.current.value) - next(self.stream) - node = nodes.Const("".join(buf), lineno=lineno) - elif token.type in ("integer", "float"): - next(self.stream) - node = nodes.Const(token.value, lineno=token.lineno) - elif token.type == "lparen": - next(self.stream) - node = self.parse_tuple(explicit_parentheses=True) - self.stream.expect("rparen") - elif token.type == "lbracket": - node = self.parse_list() - elif token.type == "lbrace": - node = self.parse_dict() - else: - self.fail(f"unexpected {describe_token(token)!r}", token.lineno) - return node - - def parse_tuple( - self, - simplified: bool = False, - with_condexpr: bool = True, - extra_end_rules: t.Optional[t.Tuple[str, ...]] = None, - explicit_parentheses: bool = False, - ) -> t.Union[nodes.Tuple, nodes.Expr]: - """Works like `parse_expression` but if multiple expressions are - delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. - This method could also return a regular expression instead of a tuple - if no commas where found. - - The default parsing mode is a full tuple. If `simplified` is `True` - only names and literals are parsed. The `no_condexpr` parameter is - forwarded to :meth:`parse_expression`. - - Because tuples do not require delimiters and may end in a bogus comma - an extra hint is needed that marks the end of a tuple. For example - for loops support tuples between `for` and `in`. In that case the - `extra_end_rules` is set to ``['name:in']``. - - `explicit_parentheses` is true if the parsing was triggered by an - expression in parentheses. This is used to figure out if an empty - tuple is a valid expression or not. - """ - lineno = self.stream.current.lineno - if simplified: - parse = self.parse_primary - elif with_condexpr: - parse = self.parse_expression - else: - - def parse() -> nodes.Expr: - return self.parse_expression(with_condexpr=False) - - args: t.List[nodes.Expr] = [] - is_tuple = False - - while True: - if args: - self.stream.expect("comma") - if self.is_tuple_end(extra_end_rules): - break - args.append(parse()) - if self.stream.current.type == "comma": - is_tuple = True - else: - break - lineno = self.stream.current.lineno - - if not is_tuple: - if args: - return args[0] - - # if we don't have explicit parentheses, an empty tuple is - # not a valid expression. This would mean nothing (literally - # nothing) in the spot of an expression would be an empty - # tuple. - if not explicit_parentheses: - self.fail( - "Expected an expression," - f" got {describe_token(self.stream.current)!r}" - ) - - return nodes.Tuple(args, "load", lineno=lineno) - - def parse_list(self) -> nodes.List: - token = self.stream.expect("lbracket") - items: t.List[nodes.Expr] = [] - while self.stream.current.type != "rbracket": - if items: - self.stream.expect("comma") - if self.stream.current.type == "rbracket": - break - items.append(self.parse_expression()) - self.stream.expect("rbracket") - return nodes.List(items, lineno=token.lineno) - - def parse_dict(self) -> nodes.Dict: - token = self.stream.expect("lbrace") - items: t.List[nodes.Pair] = [] - while self.stream.current.type != "rbrace": - if items: - self.stream.expect("comma") - if self.stream.current.type == "rbrace": - break - key = self.parse_expression() - self.stream.expect("colon") - value = self.parse_expression() - items.append(nodes.Pair(key, value, lineno=key.lineno)) - self.stream.expect("rbrace") - return nodes.Dict(items, lineno=token.lineno) - - def parse_postfix(self, node: nodes.Expr) -> nodes.Expr: - while True: - token_type = self.stream.current.type - if token_type == "dot" or token_type == "lbracket": - node = self.parse_subscript(node) - # calls are valid both after postfix expressions (getattr - # and getitem) as well as filters and tests - elif token_type == "lparen": - node = self.parse_call(node) - else: - break - return node - - def parse_filter_expr(self, node: nodes.Expr) -> nodes.Expr: - while True: - token_type = self.stream.current.type - if token_type == "pipe": - node = self.parse_filter(node) # type: ignore - elif token_type == "name" and self.stream.current.value == "is": - node = self.parse_test(node) - # calls are valid both after postfix expressions (getattr - # and getitem) as well as filters and tests - elif token_type == "lparen": - node = self.parse_call(node) - else: - break - return node - - def parse_subscript( - self, node: nodes.Expr - ) -> t.Union[nodes.Getattr, nodes.Getitem]: - token = next(self.stream) - arg: nodes.Expr - - if token.type == "dot": - attr_token = self.stream.current - next(self.stream) - if attr_token.type == "name": - return nodes.Getattr( - node, attr_token.value, "load", lineno=token.lineno - ) - elif attr_token.type != "integer": - self.fail("expected name or number", attr_token.lineno) - arg = nodes.Const(attr_token.value, lineno=attr_token.lineno) - return nodes.Getitem(node, arg, "load", lineno=token.lineno) - if token.type == "lbracket": - args: t.List[nodes.Expr] = [] - while self.stream.current.type != "rbracket": - if args: - self.stream.expect("comma") - args.append(self.parse_subscribed()) - self.stream.expect("rbracket") - if len(args) == 1: - arg = args[0] - else: - arg = nodes.Tuple(args, "load", lineno=token.lineno) - return nodes.Getitem(node, arg, "load", lineno=token.lineno) - self.fail("expected subscript expression", token.lineno) - - def parse_subscribed(self) -> nodes.Expr: - lineno = self.stream.current.lineno - args: t.List[t.Optional[nodes.Expr]] - - if self.stream.current.type == "colon": - next(self.stream) - args = [None] - else: - node = self.parse_expression() - if self.stream.current.type != "colon": - return node - next(self.stream) - args = [node] - - if self.stream.current.type == "colon": - args.append(None) - elif self.stream.current.type not in ("rbracket", "comma"): - args.append(self.parse_expression()) - else: - args.append(None) - - if self.stream.current.type == "colon": - next(self.stream) - if self.stream.current.type not in ("rbracket", "comma"): - args.append(self.parse_expression()) - else: - args.append(None) - else: - args.append(None) - - return nodes.Slice(lineno=lineno, *args) - - def parse_call_args(self) -> t.Tuple: - token = self.stream.expect("lparen") - args = [] - kwargs = [] - dyn_args = None - dyn_kwargs = None - require_comma = False - - def ensure(expr: bool) -> None: - if not expr: - self.fail("invalid syntax for function call expression", token.lineno) - - while self.stream.current.type != "rparen": - if require_comma: - self.stream.expect("comma") - - # support for trailing comma - if self.stream.current.type == "rparen": - break - - if self.stream.current.type == "mul": - ensure(dyn_args is None and dyn_kwargs is None) - next(self.stream) - dyn_args = self.parse_expression() - elif self.stream.current.type == "pow": - ensure(dyn_kwargs is None) - next(self.stream) - dyn_kwargs = self.parse_expression() - else: - if ( - self.stream.current.type == "name" - and self.stream.look().type == "assign" - ): - # Parsing a kwarg - ensure(dyn_kwargs is None) - key = self.stream.current.value - self.stream.skip(2) - value = self.parse_expression() - kwargs.append(nodes.Keyword(key, value, lineno=value.lineno)) - else: - # Parsing an arg - ensure(dyn_args is None and dyn_kwargs is None and not kwargs) - args.append(self.parse_expression()) - - require_comma = True - - self.stream.expect("rparen") - return args, kwargs, dyn_args, dyn_kwargs - - def parse_call(self, node: nodes.Expr) -> nodes.Call: - # The lparen will be expected in parse_call_args, but the lineno - # needs to be recorded before the stream is advanced. - token = self.stream.current - args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args() - return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) - - def parse_filter( - self, node: t.Optional[nodes.Expr], start_inline: bool = False - ) -> t.Optional[nodes.Expr]: - while self.stream.current.type == "pipe" or start_inline: - if not start_inline: - next(self.stream) - token = self.stream.expect("name") - name = token.value - while self.stream.current.type == "dot": - next(self.stream) - name += "." + self.stream.expect("name").value - if self.stream.current.type == "lparen": - args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args() - else: - args = [] - kwargs = [] - dyn_args = dyn_kwargs = None - node = nodes.Filter( - node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno - ) - start_inline = False - return node - - def parse_test(self, node: nodes.Expr) -> nodes.Expr: - token = next(self.stream) - if self.stream.current.test("name:not"): - next(self.stream) - negated = True - else: - negated = False - name = self.stream.expect("name").value - while self.stream.current.type == "dot": - next(self.stream) - name += "." + self.stream.expect("name").value - dyn_args = dyn_kwargs = None - kwargs = [] - if self.stream.current.type == "lparen": - args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args() - elif self.stream.current.type in { - "name", - "string", - "integer", - "float", - "lparen", - "lbracket", - "lbrace", - } and not self.stream.current.test_any("name:else", "name:or", "name:and"): - if self.stream.current.test("name:is"): - self.fail("You cannot chain multiple tests with is") - arg_node = self.parse_primary() - arg_node = self.parse_postfix(arg_node) - args = [arg_node] - else: - args = [] - node = nodes.Test( - node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno - ) - if negated: - node = nodes.Not(node, lineno=token.lineno) - return node - - def subparse( - self, end_tokens: t.Optional[t.Tuple[str, ...]] = None - ) -> t.List[nodes.Node]: - body: t.List[nodes.Node] = [] - data_buffer: t.List[nodes.Node] = [] - add_data = data_buffer.append - - if end_tokens is not None: - self._end_token_stack.append(end_tokens) - - def flush_data() -> None: - if data_buffer: - lineno = data_buffer[0].lineno - body.append(nodes.Output(data_buffer[:], lineno=lineno)) - del data_buffer[:] - - try: - while self.stream: - token = self.stream.current - if token.type == "data": - if token.value: - add_data(nodes.TemplateData(token.value, lineno=token.lineno)) - next(self.stream) - elif token.type == "variable_begin": - next(self.stream) - add_data(self.parse_tuple(with_condexpr=True)) - self.stream.expect("variable_end") - elif token.type == "block_begin": - flush_data() - next(self.stream) - if end_tokens is not None and self.stream.current.test_any( - *end_tokens - ): - return body - rv = self.parse_statement() - if isinstance(rv, list): - body.extend(rv) - else: - body.append(rv) - self.stream.expect("block_end") - else: - raise AssertionError("internal parsing error") - - flush_data() - finally: - if end_tokens is not None: - self._end_token_stack.pop() - return body - - def parse(self) -> nodes.Template: - """Parse the whole template into a `Template` node.""" - result = nodes.Template(self.subparse(), lineno=1) - result.set_environment(self.environment) - return result diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_gtk3.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_gtk3.py deleted file mode 100644 index d6acd5547b855d89de91672b24f846e7597d3f81..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_gtk3.py +++ /dev/null @@ -1,587 +0,0 @@ -import functools -import logging -import os -from pathlib import Path - -import matplotlib as mpl -from matplotlib import _api, backend_tools, cbook -from matplotlib.backend_bases import ( - ToolContainerBase, CloseEvent, KeyEvent, LocationEvent, MouseEvent, - ResizeEvent) - -try: - import gi -except ImportError as err: - raise ImportError("The GTK3 backends require PyGObject") from err - -try: - # :raises ValueError: If module/version is already loaded, already - # required, or unavailable. - gi.require_version("Gtk", "3.0") -except ValueError as e: - # in this case we want to re-raise as ImportError so the - # auto-backend selection logic correctly skips. - raise ImportError(e) from e - -from gi.repository import Gio, GLib, GObject, Gtk, Gdk -from . import _backend_gtk -from ._backend_gtk import ( # noqa: F401 # pylint: disable=W0611 - _BackendGTK, _FigureCanvasGTK, _FigureManagerGTK, _NavigationToolbar2GTK, - TimerGTK as TimerGTK3, -) - - -_log = logging.getLogger(__name__) - - -@functools.cache -def _mpl_to_gtk_cursor(mpl_cursor): - return Gdk.Cursor.new_from_name( - Gdk.Display.get_default(), - _backend_gtk.mpl_to_gtk_cursor_name(mpl_cursor)) - - -class FigureCanvasGTK3(_FigureCanvasGTK, Gtk.DrawingArea): - required_interactive_framework = "gtk3" - manager_class = _api.classproperty(lambda cls: FigureManagerGTK3) - # Setting this as a static constant prevents - # this resulting expression from leaking - event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK - | Gdk.EventMask.BUTTON_RELEASE_MASK - | Gdk.EventMask.EXPOSURE_MASK - | Gdk.EventMask.KEY_PRESS_MASK - | Gdk.EventMask.KEY_RELEASE_MASK - | Gdk.EventMask.ENTER_NOTIFY_MASK - | Gdk.EventMask.LEAVE_NOTIFY_MASK - | Gdk.EventMask.POINTER_MOTION_MASK - | Gdk.EventMask.SCROLL_MASK) - - def __init__(self, figure=None): - super().__init__(figure=figure) - - self._idle_draw_id = 0 - self._rubberband_rect = None - - self.connect('scroll_event', self.scroll_event) - self.connect('button_press_event', self.button_press_event) - self.connect('button_release_event', self.button_release_event) - self.connect('configure_event', self.configure_event) - self.connect('screen-changed', self._update_device_pixel_ratio) - self.connect('notify::scale-factor', self._update_device_pixel_ratio) - self.connect('draw', self.on_draw_event) - self.connect('draw', self._post_draw) - self.connect('key_press_event', self.key_press_event) - self.connect('key_release_event', self.key_release_event) - self.connect('motion_notify_event', self.motion_notify_event) - self.connect('enter_notify_event', self.enter_notify_event) - self.connect('leave_notify_event', self.leave_notify_event) - self.connect('size_allocate', self.size_allocate) - - self.set_events(self.__class__.event_mask) - - self.set_can_focus(True) - - css = Gtk.CssProvider() - css.load_from_data(b".matplotlib-canvas { background-color: white; }") - style_ctx = self.get_style_context() - style_ctx.add_provider(css, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) - style_ctx.add_class("matplotlib-canvas") - - def destroy(self): - CloseEvent("close_event", self)._process() - - def set_cursor(self, cursor): - # docstring inherited - window = self.get_property("window") - if window is not None: - window.set_cursor(_mpl_to_gtk_cursor(cursor)) - context = GLib.MainContext.default() - context.iteration(True) - - def _mpl_coords(self, event=None): - """ - Convert the position of a GTK event, or of the current cursor position - if *event* is None, to Matplotlib coordinates. - - GTK use logical pixels, but the figure is scaled to physical pixels for - rendering. Transform to physical pixels so that all of the down-stream - transforms work as expected. - - Also, the origin is different and needs to be corrected. - """ - if event is None: - window = self.get_window() - t, x, y, state = window.get_device_position( - window.get_display().get_device_manager().get_client_pointer()) - else: - x, y = event.x, event.y - x = x * self.device_pixel_ratio - # flip y so y=0 is bottom of canvas - y = self.figure.bbox.height - y * self.device_pixel_ratio - return x, y - - def scroll_event(self, widget, event): - step = 1 if event.direction == Gdk.ScrollDirection.UP else -1 - MouseEvent("scroll_event", self, - *self._mpl_coords(event), step=step, - modifiers=self._mpl_modifiers(event.state), - guiEvent=event)._process() - return False # finish event propagation? - - def button_press_event(self, widget, event): - MouseEvent("button_press_event", self, - *self._mpl_coords(event), event.button, - modifiers=self._mpl_modifiers(event.state), - guiEvent=event)._process() - return False # finish event propagation? - - def button_release_event(self, widget, event): - MouseEvent("button_release_event", self, - *self._mpl_coords(event), event.button, - modifiers=self._mpl_modifiers(event.state), - guiEvent=event)._process() - return False # finish event propagation? - - def key_press_event(self, widget, event): - KeyEvent("key_press_event", self, - self._get_key(event), *self._mpl_coords(), - guiEvent=event)._process() - return True # stop event propagation - - def key_release_event(self, widget, event): - KeyEvent("key_release_event", self, - self._get_key(event), *self._mpl_coords(), - guiEvent=event)._process() - return True # stop event propagation - - def motion_notify_event(self, widget, event): - MouseEvent("motion_notify_event", self, *self._mpl_coords(event), - modifiers=self._mpl_modifiers(event.state), - guiEvent=event)._process() - return False # finish event propagation? - - def enter_notify_event(self, widget, event): - gtk_mods = Gdk.Keymap.get_for_display( - self.get_display()).get_modifier_state() - LocationEvent("figure_enter_event", self, *self._mpl_coords(event), - modifiers=self._mpl_modifiers(gtk_mods), - guiEvent=event)._process() - - def leave_notify_event(self, widget, event): - gtk_mods = Gdk.Keymap.get_for_display( - self.get_display()).get_modifier_state() - LocationEvent("figure_leave_event", self, *self._mpl_coords(event), - modifiers=self._mpl_modifiers(gtk_mods), - guiEvent=event)._process() - - def size_allocate(self, widget, allocation): - dpival = self.figure.dpi - winch = allocation.width * self.device_pixel_ratio / dpival - hinch = allocation.height * self.device_pixel_ratio / dpival - self.figure.set_size_inches(winch, hinch, forward=False) - ResizeEvent("resize_event", self)._process() - self.draw_idle() - - @staticmethod - def _mpl_modifiers(event_state, *, exclude=None): - modifiers = [ - ("ctrl", Gdk.ModifierType.CONTROL_MASK, "control"), - ("alt", Gdk.ModifierType.MOD1_MASK, "alt"), - ("shift", Gdk.ModifierType.SHIFT_MASK, "shift"), - ("super", Gdk.ModifierType.MOD4_MASK, "super"), - ] - return [name for name, mask, key in modifiers - if exclude != key and event_state & mask] - - def _get_key(self, event): - unikey = chr(Gdk.keyval_to_unicode(event.keyval)) - key = cbook._unikey_or_keysym_to_mplkey( - unikey, Gdk.keyval_name(event.keyval)) - mods = self._mpl_modifiers(event.state, exclude=key) - if "shift" in mods and unikey.isprintable(): - mods.remove("shift") - return "+".join([*mods, key]) - - def _update_device_pixel_ratio(self, *args, **kwargs): - # We need to be careful in cases with mixed resolution displays if - # device_pixel_ratio changes. - if self._set_device_pixel_ratio(self.get_scale_factor()): - # The easiest way to resize the canvas is to emit a resize event - # since we implement all the logic for resizing the canvas for that - # event. - self.queue_resize() - self.queue_draw() - - def configure_event(self, widget, event): - if widget.get_property("window") is None: - return - w = event.width * self.device_pixel_ratio - h = event.height * self.device_pixel_ratio - if w < 3 or h < 3: - return # empty fig - # resize the figure (in inches) - dpi = self.figure.dpi - self.figure.set_size_inches(w / dpi, h / dpi, forward=False) - return False # finish event propagation? - - def _draw_rubberband(self, rect): - self._rubberband_rect = rect - # TODO: Only update the rubberband area. - self.queue_draw() - - def _post_draw(self, widget, ctx): - if self._rubberband_rect is None: - return - - x0, y0, w, h = (dim / self.device_pixel_ratio - for dim in self._rubberband_rect) - x1 = x0 + w - y1 = y0 + h - - # Draw the lines from x0, y0 towards x1, y1 so that the - # dashes don't "jump" when moving the zoom box. - ctx.move_to(x0, y0) - ctx.line_to(x0, y1) - ctx.move_to(x0, y0) - ctx.line_to(x1, y0) - ctx.move_to(x0, y1) - ctx.line_to(x1, y1) - ctx.move_to(x1, y0) - ctx.line_to(x1, y1) - - ctx.set_antialias(1) - ctx.set_line_width(1) - ctx.set_dash((3, 3), 0) - ctx.set_source_rgb(0, 0, 0) - ctx.stroke_preserve() - - ctx.set_dash((3, 3), 3) - ctx.set_source_rgb(1, 1, 1) - ctx.stroke() - - def on_draw_event(self, widget, ctx): - # to be overwritten by GTK3Agg or GTK3Cairo - pass - - def draw(self): - # docstring inherited - if self.is_drawable(): - self.queue_draw() - - def draw_idle(self): - # docstring inherited - if self._idle_draw_id != 0: - return - def idle_draw(*args): - try: - self.draw() - finally: - self._idle_draw_id = 0 - return False - self._idle_draw_id = GLib.idle_add(idle_draw) - - def flush_events(self): - # docstring inherited - context = GLib.MainContext.default() - while context.pending(): - context.iteration(True) - - -class NavigationToolbar2GTK3(_NavigationToolbar2GTK, Gtk.Toolbar): - def __init__(self, canvas): - GObject.GObject.__init__(self) - - self.set_style(Gtk.ToolbarStyle.ICONS) - - self._gtk_ids = {} - for text, tooltip_text, image_file, callback in self.toolitems: - if text is None: - self.insert(Gtk.SeparatorToolItem(), -1) - continue - image = Gtk.Image.new_from_gicon( - Gio.Icon.new_for_string( - str(cbook._get_data_path('images', - f'{image_file}-symbolic.svg'))), - Gtk.IconSize.LARGE_TOOLBAR) - self._gtk_ids[text] = button = ( - Gtk.ToggleToolButton() if callback in ['zoom', 'pan'] else - Gtk.ToolButton()) - button.set_label(text) - button.set_icon_widget(image) - # Save the handler id, so that we can block it as needed. - button._signal_handler = button.connect( - 'clicked', getattr(self, callback)) - button.set_tooltip_text(tooltip_text) - self.insert(button, -1) - - # This filler item ensures the toolbar is always at least two text - # lines high. Otherwise the canvas gets redrawn as the mouse hovers - # over images because those use two-line messages which resize the - # toolbar. - toolitem = Gtk.ToolItem() - self.insert(toolitem, -1) - label = Gtk.Label() - label.set_markup( - '\N{NO-BREAK SPACE}\n\N{NO-BREAK SPACE}') - toolitem.set_expand(True) # Push real message to the right. - toolitem.add(label) - - toolitem = Gtk.ToolItem() - self.insert(toolitem, -1) - self.message = Gtk.Label() - self.message.set_justify(Gtk.Justification.RIGHT) - toolitem.add(self.message) - - self.show_all() - - _NavigationToolbar2GTK.__init__(self, canvas) - - def save_figure(self, *args): - dialog = Gtk.FileChooserDialog( - title="Save the figure", - parent=self.canvas.get_toplevel(), - action=Gtk.FileChooserAction.SAVE, - buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, - Gtk.STOCK_SAVE, Gtk.ResponseType.OK), - ) - for name, fmts \ - in self.canvas.get_supported_filetypes_grouped().items(): - ff = Gtk.FileFilter() - ff.set_name(name) - for fmt in fmts: - ff.add_pattern(f'*.{fmt}') - dialog.add_filter(ff) - if self.canvas.get_default_filetype() in fmts: - dialog.set_filter(ff) - - @functools.partial(dialog.connect, "notify::filter") - def on_notify_filter(*args): - name = dialog.get_filter().get_name() - fmt = self.canvas.get_supported_filetypes_grouped()[name][0] - dialog.set_current_name( - str(Path(dialog.get_current_name()).with_suffix(f'.{fmt}'))) - - dialog.set_current_folder(mpl.rcParams["savefig.directory"]) - dialog.set_current_name(self.canvas.get_default_filename()) - dialog.set_do_overwrite_confirmation(True) - - response = dialog.run() - fname = dialog.get_filename() - ff = dialog.get_filter() # Doesn't autoadjust to filename :/ - fmt = self.canvas.get_supported_filetypes_grouped()[ff.get_name()][0] - dialog.destroy() - if response != Gtk.ResponseType.OK: - return - # Save dir for next time, unless empty str (which means use cwd). - if mpl.rcParams['savefig.directory']: - mpl.rcParams['savefig.directory'] = os.path.dirname(fname) - try: - self.canvas.figure.savefig(fname, format=fmt) - except Exception as e: - dialog = Gtk.MessageDialog( - parent=self.canvas.get_toplevel(), message_format=str(e), - type=Gtk.MessageType.ERROR, buttons=Gtk.ButtonsType.OK) - dialog.run() - dialog.destroy() - - -class ToolbarGTK3(ToolContainerBase, Gtk.Box): - _icon_extension = '-symbolic.svg' - - def __init__(self, toolmanager): - ToolContainerBase.__init__(self, toolmanager) - Gtk.Box.__init__(self) - self.set_property('orientation', Gtk.Orientation.HORIZONTAL) - self._message = Gtk.Label() - self._message.set_justify(Gtk.Justification.RIGHT) - self.pack_end(self._message, False, False, 0) - self.show_all() - self._groups = {} - self._toolitems = {} - - def add_toolitem(self, name, group, position, image_file, description, - toggle): - if toggle: - button = Gtk.ToggleToolButton() - else: - button = Gtk.ToolButton() - button.set_label(name) - - if image_file is not None: - image = Gtk.Image.new_from_gicon( - Gio.Icon.new_for_string(image_file), - Gtk.IconSize.LARGE_TOOLBAR) - button.set_icon_widget(image) - - if position is None: - position = -1 - - self._add_button(button, group, position) - signal = button.connect('clicked', self._call_tool, name) - button.set_tooltip_text(description) - button.show_all() - self._toolitems.setdefault(name, []) - self._toolitems[name].append((button, signal)) - - def _add_button(self, button, group, position): - if group not in self._groups: - if self._groups: - self._add_separator() - toolbar = Gtk.Toolbar() - toolbar.set_style(Gtk.ToolbarStyle.ICONS) - self.pack_start(toolbar, False, False, 0) - toolbar.show_all() - self._groups[group] = toolbar - self._groups[group].insert(button, position) - - def _call_tool(self, btn, name): - self.trigger_tool(name) - - def toggle_toolitem(self, name, toggled): - if name not in self._toolitems: - return - for toolitem, signal in self._toolitems[name]: - toolitem.handler_block(signal) - toolitem.set_active(toggled) - toolitem.handler_unblock(signal) - - def remove_toolitem(self, name): - if name not in self._toolitems: - self.toolmanager.message_event(f'{name} not in toolbar', self) - return - - for group in self._groups: - for toolitem, _signal in self._toolitems[name]: - if toolitem in self._groups[group]: - self._groups[group].remove(toolitem) - del self._toolitems[name] - - def _add_separator(self): - sep = Gtk.Separator() - sep.set_property("orientation", Gtk.Orientation.VERTICAL) - self.pack_start(sep, False, True, 0) - sep.show_all() - - def set_message(self, s): - self._message.set_label(s) - - -@backend_tools._register_tool_class(FigureCanvasGTK3) -class SaveFigureGTK3(backend_tools.SaveFigureBase): - def trigger(self, *args, **kwargs): - NavigationToolbar2GTK3.save_figure( - self._make_classic_style_pseudo_toolbar()) - - -@backend_tools._register_tool_class(FigureCanvasGTK3) -class HelpGTK3(backend_tools.ToolHelpBase): - def _normalize_shortcut(self, key): - """ - Convert Matplotlib key presses to GTK+ accelerator identifiers. - - Related to `FigureCanvasGTK3._get_key`. - """ - special = { - 'backspace': 'BackSpace', - 'pagedown': 'Page_Down', - 'pageup': 'Page_Up', - 'scroll_lock': 'Scroll_Lock', - } - - parts = key.split('+') - mods = ['<' + mod + '>' for mod in parts[:-1]] - key = parts[-1] - - if key in special: - key = special[key] - elif len(key) > 1: - key = key.capitalize() - elif key.isupper(): - mods += [''] - - return ''.join(mods) + key - - def _is_valid_shortcut(self, key): - """ - Check for a valid shortcut to be displayed. - - - GTK will never send 'cmd+' (see `FigureCanvasGTK3._get_key`). - - The shortcut window only shows keyboard shortcuts, not mouse buttons. - """ - return 'cmd+' not in key and not key.startswith('MouseButton.') - - def _show_shortcuts_window(self): - section = Gtk.ShortcutsSection() - - for name, tool in sorted(self.toolmanager.tools.items()): - if not tool.description: - continue - - # Putting everything in a separate group allows GTK to - # automatically split them into separate columns/pages, which is - # useful because we have lots of shortcuts, some with many keys - # that are very wide. - group = Gtk.ShortcutsGroup() - section.add(group) - # A hack to remove the title since we have no group naming. - group.forall(lambda widget, data: widget.set_visible(False), None) - - shortcut = Gtk.ShortcutsShortcut( - accelerator=' '.join( - self._normalize_shortcut(key) - for key in self.toolmanager.get_tool_keymap(name) - if self._is_valid_shortcut(key)), - title=tool.name, - subtitle=tool.description) - group.add(shortcut) - - window = Gtk.ShortcutsWindow( - title='Help', - modal=True, - transient_for=self._figure.canvas.get_toplevel()) - section.show() # Must be done explicitly before add! - window.add(section) - - window.show_all() - - def _show_shortcuts_dialog(self): - dialog = Gtk.MessageDialog( - self._figure.canvas.get_toplevel(), - 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, self._get_help_text(), - title="Help") - dialog.run() - dialog.destroy() - - def trigger(self, *args): - if Gtk.check_version(3, 20, 0) is None: - self._show_shortcuts_window() - else: - self._show_shortcuts_dialog() - - -@backend_tools._register_tool_class(FigureCanvasGTK3) -class ToolCopyToClipboardGTK3(backend_tools.ToolCopyToClipboardBase): - def trigger(self, *args, **kwargs): - clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) - window = self.canvas.get_window() - x, y, width, height = window.get_geometry() - pb = Gdk.pixbuf_get_from_window(window, x, y, width, height) - clipboard.set_image(pb) - - -Toolbar = ToolbarGTK3 -backend_tools._register_tool_class( - FigureCanvasGTK3, _backend_gtk.ConfigureSubplotsGTK) -backend_tools._register_tool_class( - FigureCanvasGTK3, _backend_gtk.RubberbandGTK) - - -class FigureManagerGTK3(_FigureManagerGTK): - _toolbar2_class = NavigationToolbar2GTK3 - _toolmanager_toolbar_class = ToolbarGTK3 - - -@_BackendGTK.export -class _BackendGTK3(_BackendGTK): - FigureCanvas = FigureCanvasGTK3 - FigureManager = FigureManagerGTK3 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_gtk4cairo.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_gtk4cairo.py deleted file mode 100644 index d57f53fb28d6c74a3e327320230d60dc85c1ae17..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_gtk4cairo.py +++ /dev/null @@ -1,28 +0,0 @@ -from contextlib import nullcontext - -from .backend_cairo import FigureCanvasCairo -from .backend_gtk4 import Gtk, FigureCanvasGTK4, _BackendGTK4 - - -class FigureCanvasGTK4Cairo(FigureCanvasCairo, FigureCanvasGTK4): - _context_is_scaled = True - - def on_draw_event(self, widget, ctx): - with (self.toolbar._wait_cursor_for_draw_cm() if self.toolbar - else nullcontext()): - self._renderer.set_context(ctx) - scale = self.device_pixel_ratio - # Scale physical drawing to logical size. - ctx.scale(1 / scale, 1 / scale) - allocation = self.get_allocation() - Gtk.render_background( - self.get_style_context(), ctx, - allocation.x, allocation.y, - allocation.width, allocation.height) - self._renderer.dpi = self.figure.dpi - self.figure.draw(self._renderer) - - -@_BackendGTK4.export -class _BackendGTK4Cairo(_BackendGTK4): - FigureCanvas = FigureCanvasGTK4Cairo diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_svg.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_svg.py deleted file mode 100644 index f62152ed1f0acf233d70796748ed9f4594cc1203..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_svg.py +++ /dev/null @@ -1,1368 +0,0 @@ -import base64 -import codecs -import datetime -import gzip -import hashlib -from io import BytesIO -import itertools -import logging -import os -import re -import uuid - -import numpy as np -from PIL import Image - -import matplotlib as mpl -from matplotlib import cbook, font_manager as fm -from matplotlib.backend_bases import ( - _Backend, FigureCanvasBase, FigureManagerBase, RendererBase) -from matplotlib.backends.backend_mixed import MixedModeRenderer -from matplotlib.colors import rgb2hex -from matplotlib.dates import UTC -from matplotlib.path import Path -from matplotlib import _path -from matplotlib.transforms import Affine2D, Affine2DBase - - -_log = logging.getLogger(__name__) - - -# ---------------------------------------------------------------------- -# SimpleXMLWriter class -# -# Based on an original by Fredrik Lundh, but modified here to: -# 1. Support modern Python idioms -# 2. Remove encoding support (it's handled by the file writer instead) -# 3. Support proper indentation -# 4. Minify things a little bit - -# -------------------------------------------------------------------- -# The SimpleXMLWriter module is -# -# Copyright (c) 2001-2004 by Fredrik Lundh -# -# By obtaining, using, and/or copying this software and/or its -# associated documentation, you agree that you have read, understood, -# and will comply with the following terms and conditions: -# -# Permission to use, copy, modify, and distribute this software and -# its associated documentation for any purpose and without fee is -# hereby granted, provided that the above copyright notice appears in -# all copies, and that both that copyright notice and this permission -# notice appear in supporting documentation, and that the name of -# Secret Labs AB or the author not be used in advertising or publicity -# pertaining to distribution of the software without specific, written -# prior permission. -# -# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD -# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- -# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR -# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY -# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS -# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE -# OF THIS SOFTWARE. -# -------------------------------------------------------------------- - - -def _escape_cdata(s): - s = s.replace("&", "&") - s = s.replace("<", "<") - s = s.replace(">", ">") - return s - - -_escape_xml_comment = re.compile(r'-(?=-)') - - -def _escape_comment(s): - s = _escape_cdata(s) - return _escape_xml_comment.sub('- ', s) - - -def _escape_attrib(s): - s = s.replace("&", "&") - s = s.replace("'", "'") - s = s.replace('"', """) - s = s.replace("<", "<") - s = s.replace(">", ">") - return s - - -def _quote_escape_attrib(s): - return ('"' + _escape_cdata(s) + '"' if '"' not in s else - "'" + _escape_cdata(s) + "'" if "'" not in s else - '"' + _escape_attrib(s) + '"') - - -def _short_float_fmt(x): - """ - Create a short string representation of a float, which is %f - formatting with trailing zeros and the decimal point removed. - """ - return f'{x:f}'.rstrip('0').rstrip('.') - - -class XMLWriter: - """ - Parameters - ---------- - file : writable text file-like object - """ - - def __init__(self, file): - self.__write = file.write - if hasattr(file, "flush"): - self.flush = file.flush - self.__open = 0 # true if start tag is open - self.__tags = [] - self.__data = [] - self.__indentation = " " * 64 - - def __flush(self, indent=True): - # flush internal buffers - if self.__open: - if indent: - self.__write(">\n") - else: - self.__write(">") - self.__open = 0 - if self.__data: - data = ''.join(self.__data) - self.__write(_escape_cdata(data)) - self.__data = [] - - def start(self, tag, attrib={}, **extra): - """ - Open a new element. Attributes can be given as keyword - arguments, or as a string/string dictionary. The method returns - an opaque identifier that can be passed to the :meth:`close` - method, to close all open elements up to and including this one. - - Parameters - ---------- - tag - Element tag. - attrib - Attribute dictionary. Alternatively, attributes can be given as - keyword arguments. - - Returns - ------- - An element identifier. - """ - self.__flush() - tag = _escape_cdata(tag) - self.__data = [] - self.__tags.append(tag) - self.__write(self.__indentation[:len(self.__tags) - 1]) - self.__write(f"<{tag}") - for k, v in {**attrib, **extra}.items(): - if v: - k = _escape_cdata(k) - v = _quote_escape_attrib(v) - self.__write(f' {k}={v}') - self.__open = 1 - return len(self.__tags) - 1 - - def comment(self, comment): - """ - Add a comment to the output stream. - - Parameters - ---------- - comment : str - Comment text. - """ - self.__flush() - self.__write(self.__indentation[:len(self.__tags)]) - self.__write(f"\n") - - def data(self, text): - """ - Add character data to the output stream. - - Parameters - ---------- - text : str - Character data. - """ - self.__data.append(text) - - def end(self, tag=None, indent=True): - """ - Close the current element (opened by the most recent call to - :meth:`start`). - - Parameters - ---------- - tag - Element tag. If given, the tag must match the start tag. If - omitted, the current element is closed. - indent : bool, default: True - """ - if tag: - assert self.__tags, f"unbalanced end({tag})" - assert _escape_cdata(tag) == self.__tags[-1], \ - f"expected end({self.__tags[-1]}), got {tag}" - else: - assert self.__tags, "unbalanced end()" - tag = self.__tags.pop() - if self.__data: - self.__flush(indent) - elif self.__open: - self.__open = 0 - self.__write("/>\n") - return - if indent: - self.__write(self.__indentation[:len(self.__tags)]) - self.__write(f"\n") - - def close(self, id): - """ - Close open elements, up to (and including) the element identified - by the given identifier. - - Parameters - ---------- - id - Element identifier, as returned by the :meth:`start` method. - """ - while len(self.__tags) > id: - self.end() - - def element(self, tag, text=None, attrib={}, **extra): - """ - Add an entire element. This is the same as calling :meth:`start`, - :meth:`data`, and :meth:`end` in sequence. The *text* argument can be - omitted. - """ - self.start(tag, attrib, **extra) - if text: - self.data(text) - self.end(indent=False) - - def flush(self): - """Flush the output stream.""" - pass # replaced by the constructor - - -def _generate_transform(transform_list): - parts = [] - for type, value in transform_list: - if (type == 'scale' and (value == (1,) or value == (1, 1)) - or type == 'translate' and value == (0, 0) - or type == 'rotate' and value == (0,)): - continue - if type == 'matrix' and isinstance(value, Affine2DBase): - value = value.to_values() - parts.append('{}({})'.format( - type, ' '.join(_short_float_fmt(x) for x in value))) - return ' '.join(parts) - - -def _generate_css(attrib): - return "; ".join(f"{k}: {v}" for k, v in attrib.items()) - - -_capstyle_d = {'projecting': 'square', 'butt': 'butt', 'round': 'round'} - - -def _check_is_str(info, key): - if not isinstance(info, str): - raise TypeError(f'Invalid type for {key} metadata. Expected str, not ' - f'{type(info)}.') - - -def _check_is_iterable_of_str(infos, key): - if np.iterable(infos): - for info in infos: - if not isinstance(info, str): - raise TypeError(f'Invalid type for {key} metadata. Expected ' - f'iterable of str, not {type(info)}.') - else: - raise TypeError(f'Invalid type for {key} metadata. Expected str or ' - f'iterable of str, not {type(infos)}.') - - -class RendererSVG(RendererBase): - def __init__(self, width, height, svgwriter, basename=None, image_dpi=72, - *, metadata=None): - self.width = width - self.height = height - self.writer = XMLWriter(svgwriter) - self.image_dpi = image_dpi # actual dpi at which we rasterize stuff - - if basename is None: - basename = getattr(svgwriter, "name", "") - if not isinstance(basename, str): - basename = "" - self.basename = basename - - self._groupd = {} - self._image_counter = itertools.count() - self._clipd = {} - self._markers = {} - self._path_collection_id = 0 - self._hatchd = {} - self._has_gouraud = False - self._n_gradients = 0 - - super().__init__() - self._glyph_map = dict() - str_height = _short_float_fmt(height) - str_width = _short_float_fmt(width) - svgwriter.write(svgProlog) - self._start_id = self.writer.start( - 'svg', - width=f'{str_width}pt', - height=f'{str_height}pt', - viewBox=f'0 0 {str_width} {str_height}', - xmlns="http://www.w3.org/2000/svg", - version="1.1", - attrib={'xmlns:xlink': "http://www.w3.org/1999/xlink"}) - self._write_metadata(metadata) - self._write_default_style() - - def finalize(self): - self._write_clips() - self._write_hatches() - self.writer.close(self._start_id) - self.writer.flush() - - def _write_metadata(self, metadata): - # Add metadata following the Dublin Core Metadata Initiative, and the - # Creative Commons Rights Expression Language. This is mainly for - # compatibility with Inkscape. - if metadata is None: - metadata = {} - metadata = { - 'Format': 'image/svg+xml', - 'Type': 'http://purl.org/dc/dcmitype/StillImage', - 'Creator': - f'Matplotlib v{mpl.__version__}, https://matplotlib.org/', - **metadata - } - writer = self.writer - - if 'Title' in metadata: - title = metadata['Title'] - _check_is_str(title, 'Title') - writer.element('title', text=title) - - # Special handling. - date = metadata.get('Date', None) - if date is not None: - if isinstance(date, str): - dates = [date] - elif isinstance(date, (datetime.datetime, datetime.date)): - dates = [date.isoformat()] - elif np.iterable(date): - dates = [] - for d in date: - if isinstance(d, str): - dates.append(d) - elif isinstance(d, (datetime.datetime, datetime.date)): - dates.append(d.isoformat()) - else: - raise TypeError( - f'Invalid type for Date metadata. ' - f'Expected iterable of str, date, or datetime, ' - f'not {type(d)}.') - else: - raise TypeError(f'Invalid type for Date metadata. ' - f'Expected str, date, datetime, or iterable ' - f'of the same, not {type(date)}.') - metadata['Date'] = '/'.join(dates) - elif 'Date' not in metadata: - # Do not add `Date` if the user explicitly set `Date` to `None` - # Get source date from SOURCE_DATE_EPOCH, if set. - # See https://reproducible-builds.org/specs/source-date-epoch/ - date = os.getenv("SOURCE_DATE_EPOCH") - if date: - date = datetime.datetime.fromtimestamp(int(date), datetime.timezone.utc) - metadata['Date'] = date.replace(tzinfo=UTC).isoformat() - else: - metadata['Date'] = datetime.datetime.today().isoformat() - - mid = None - def ensure_metadata(mid): - if mid is not None: - return mid - mid = writer.start('metadata') - writer.start('rdf:RDF', attrib={ - 'xmlns:dc': "http://purl.org/dc/elements/1.1/", - 'xmlns:cc': "http://creativecommons.org/ns#", - 'xmlns:rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#", - }) - writer.start('cc:Work') - return mid - - uri = metadata.pop('Type', None) - if uri is not None: - mid = ensure_metadata(mid) - writer.element('dc:type', attrib={'rdf:resource': uri}) - - # Single value only. - for key in ['Title', 'Coverage', 'Date', 'Description', 'Format', - 'Identifier', 'Language', 'Relation', 'Source']: - info = metadata.pop(key, None) - if info is not None: - mid = ensure_metadata(mid) - _check_is_str(info, key) - writer.element(f'dc:{key.lower()}', text=info) - - # Multiple Agent values. - for key in ['Creator', 'Contributor', 'Publisher', 'Rights']: - agents = metadata.pop(key, None) - if agents is None: - continue - - if isinstance(agents, str): - agents = [agents] - - _check_is_iterable_of_str(agents, key) - # Now we know that we have an iterable of str - mid = ensure_metadata(mid) - writer.start(f'dc:{key.lower()}') - for agent in agents: - writer.start('cc:Agent') - writer.element('dc:title', text=agent) - writer.end('cc:Agent') - writer.end(f'dc:{key.lower()}') - - # Multiple values. - keywords = metadata.pop('Keywords', None) - if keywords is not None: - if isinstance(keywords, str): - keywords = [keywords] - _check_is_iterable_of_str(keywords, 'Keywords') - # Now we know that we have an iterable of str - mid = ensure_metadata(mid) - writer.start('dc:subject') - writer.start('rdf:Bag') - for keyword in keywords: - writer.element('rdf:li', text=keyword) - writer.end('rdf:Bag') - writer.end('dc:subject') - - if mid is not None: - writer.close(mid) - - if metadata: - raise ValueError('Unknown metadata key(s) passed to SVG writer: ' + - ','.join(metadata)) - - def _write_default_style(self): - writer = self.writer - default_style = _generate_css({ - 'stroke-linejoin': 'round', - 'stroke-linecap': 'butt'}) - writer.start('defs') - writer.element('style', type='text/css', text='*{%s}' % default_style) - writer.end('defs') - - def _make_id(self, type, content): - salt = mpl.rcParams['svg.hashsalt'] - if salt is None: - salt = str(uuid.uuid4()) - m = hashlib.sha256() - m.update(salt.encode('utf8')) - m.update(str(content).encode('utf8')) - return f'{type}{m.hexdigest()[:10]}' - - def _make_flip_transform(self, transform): - return transform + Affine2D().scale(1, -1).translate(0, self.height) - - def _get_hatch(self, gc, rgbFace): - """ - Create a new hatch pattern - """ - if rgbFace is not None: - rgbFace = tuple(rgbFace) - edge = gc.get_hatch_color() - if edge is not None: - edge = tuple(edge) - dictkey = (gc.get_hatch(), rgbFace, edge) - oid = self._hatchd.get(dictkey) - if oid is None: - oid = self._make_id('h', dictkey) - self._hatchd[dictkey] = ((gc.get_hatch_path(), rgbFace, edge), oid) - else: - _, oid = oid - return oid - - def _write_hatches(self): - if not len(self._hatchd): - return - HATCH_SIZE = 72 - writer = self.writer - writer.start('defs') - for (path, face, stroke), oid in self._hatchd.values(): - writer.start( - 'pattern', - id=oid, - patternUnits="userSpaceOnUse", - x="0", y="0", width=str(HATCH_SIZE), - height=str(HATCH_SIZE)) - path_data = self._convert_path( - path, - Affine2D() - .scale(HATCH_SIZE).scale(1.0, -1.0).translate(0, HATCH_SIZE), - simplify=False) - if face is None: - fill = 'none' - else: - fill = rgb2hex(face) - writer.element( - 'rect', - x="0", y="0", width=str(HATCH_SIZE+1), - height=str(HATCH_SIZE+1), - fill=fill) - hatch_style = { - 'fill': rgb2hex(stroke), - 'stroke': rgb2hex(stroke), - 'stroke-width': str(mpl.rcParams['hatch.linewidth']), - 'stroke-linecap': 'butt', - 'stroke-linejoin': 'miter' - } - if stroke[3] < 1: - hatch_style['stroke-opacity'] = str(stroke[3]) - writer.element( - 'path', - d=path_data, - style=_generate_css(hatch_style) - ) - writer.end('pattern') - writer.end('defs') - - def _get_style_dict(self, gc, rgbFace): - """Generate a style string from the GraphicsContext and rgbFace.""" - attrib = {} - - forced_alpha = gc.get_forced_alpha() - - if gc.get_hatch() is not None: - attrib['fill'] = f"url(#{self._get_hatch(gc, rgbFace)})" - if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 - and not forced_alpha): - attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) - else: - if rgbFace is None: - attrib['fill'] = 'none' - else: - if tuple(rgbFace[:3]) != (0, 0, 0): - attrib['fill'] = rgb2hex(rgbFace) - if (len(rgbFace) == 4 and rgbFace[3] != 1.0 - and not forced_alpha): - attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) - - if forced_alpha and gc.get_alpha() != 1.0: - attrib['opacity'] = _short_float_fmt(gc.get_alpha()) - - offset, seq = gc.get_dashes() - if seq is not None: - attrib['stroke-dasharray'] = ','.join( - _short_float_fmt(val) for val in seq) - attrib['stroke-dashoffset'] = _short_float_fmt(float(offset)) - - linewidth = gc.get_linewidth() - if linewidth: - rgb = gc.get_rgb() - attrib['stroke'] = rgb2hex(rgb) - if not forced_alpha and rgb[3] != 1.0: - attrib['stroke-opacity'] = _short_float_fmt(rgb[3]) - if linewidth != 1.0: - attrib['stroke-width'] = _short_float_fmt(linewidth) - if gc.get_joinstyle() != 'round': - attrib['stroke-linejoin'] = gc.get_joinstyle() - if gc.get_capstyle() != 'butt': - attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()] - - return attrib - - def _get_style(self, gc, rgbFace): - return _generate_css(self._get_style_dict(gc, rgbFace)) - - def _get_clip_attrs(self, gc): - cliprect = gc.get_clip_rectangle() - clippath, clippath_trans = gc.get_clip_path() - if clippath is not None: - clippath_trans = self._make_flip_transform(clippath_trans) - dictkey = (id(clippath), str(clippath_trans)) - elif cliprect is not None: - x, y, w, h = cliprect.bounds - y = self.height-(y+h) - dictkey = (x, y, w, h) - else: - return {} - clip = self._clipd.get(dictkey) - if clip is None: - oid = self._make_id('p', dictkey) - if clippath is not None: - self._clipd[dictkey] = ((clippath, clippath_trans), oid) - else: - self._clipd[dictkey] = (dictkey, oid) - else: - clip, oid = clip - return {'clip-path': f'url(#{oid})'} - - def _write_clips(self): - if not len(self._clipd): - return - writer = self.writer - writer.start('defs') - for clip, oid in self._clipd.values(): - writer.start('clipPath', id=oid) - if len(clip) == 2: - clippath, clippath_trans = clip - path_data = self._convert_path( - clippath, clippath_trans, simplify=False) - writer.element('path', d=path_data) - else: - x, y, w, h = clip - writer.element( - 'rect', - x=_short_float_fmt(x), - y=_short_float_fmt(y), - width=_short_float_fmt(w), - height=_short_float_fmt(h)) - writer.end('clipPath') - writer.end('defs') - - def open_group(self, s, gid=None): - # docstring inherited - if gid: - self.writer.start('g', id=gid) - else: - self._groupd[s] = self._groupd.get(s, 0) + 1 - self.writer.start('g', id=f"{s}_{self._groupd[s]:d}") - - def close_group(self, s): - # docstring inherited - self.writer.end('g') - - def option_image_nocomposite(self): - # docstring inherited - return not mpl.rcParams['image.composite_image'] - - def _convert_path(self, path, transform=None, clip=None, simplify=None, - sketch=None): - if clip: - clip = (0.0, 0.0, self.width, self.height) - else: - clip = None - return _path.convert_to_string( - path, transform, clip, simplify, sketch, 6, - [b'M', b'L', b'Q', b'C', b'z'], False).decode('ascii') - - def draw_path(self, gc, path, transform, rgbFace=None): - # docstring inherited - trans_and_flip = self._make_flip_transform(transform) - clip = (rgbFace is None and gc.get_hatch_path() is None) - simplify = path.should_simplify and clip - path_data = self._convert_path( - path, trans_and_flip, clip=clip, simplify=simplify, - sketch=gc.get_sketch_params()) - - if gc.get_url() is not None: - self.writer.start('a', {'xlink:href': gc.get_url()}) - self.writer.element('path', d=path_data, **self._get_clip_attrs(gc), - style=self._get_style(gc, rgbFace)) - if gc.get_url() is not None: - self.writer.end('a') - - def draw_markers( - self, gc, marker_path, marker_trans, path, trans, rgbFace=None): - # docstring inherited - - if not len(path.vertices): - return - - writer = self.writer - path_data = self._convert_path( - marker_path, - marker_trans + Affine2D().scale(1.0, -1.0), - simplify=False) - style = self._get_style_dict(gc, rgbFace) - dictkey = (path_data, _generate_css(style)) - oid = self._markers.get(dictkey) - style = _generate_css({k: v for k, v in style.items() - if k.startswith('stroke')}) - - if oid is None: - oid = self._make_id('m', dictkey) - writer.start('defs') - writer.element('path', id=oid, d=path_data, style=style) - writer.end('defs') - self._markers[dictkey] = oid - - writer.start('g', **self._get_clip_attrs(gc)) - trans_and_flip = self._make_flip_transform(trans) - attrib = {'xlink:href': f'#{oid}'} - clip = (0, 0, self.width*72, self.height*72) - for vertices, code in path.iter_segments( - trans_and_flip, clip=clip, simplify=False): - if len(vertices): - x, y = vertices[-2:] - attrib['x'] = _short_float_fmt(x) - attrib['y'] = _short_float_fmt(y) - attrib['style'] = self._get_style(gc, rgbFace) - writer.element('use', attrib=attrib) - writer.end('g') - - def draw_path_collection(self, gc, master_transform, paths, all_transforms, - offsets, offset_trans, facecolors, edgecolors, - linewidths, linestyles, antialiaseds, urls, - offset_position): - # Is the optimization worth it? Rough calculation: - # cost of emitting a path in-line is - # (len_path + 5) * uses_per_path - # cost of definition+use is - # (len_path + 3) + 9 * uses_per_path - len_path = len(paths[0].vertices) if len(paths) > 0 else 0 - uses_per_path = self._iter_collection_uses_per_path( - paths, all_transforms, offsets, facecolors, edgecolors) - should_do_optimization = \ - len_path + 9 * uses_per_path + 3 < (len_path + 5) * uses_per_path - if not should_do_optimization: - return super().draw_path_collection( - gc, master_transform, paths, all_transforms, - offsets, offset_trans, facecolors, edgecolors, - linewidths, linestyles, antialiaseds, urls, - offset_position) - - writer = self.writer - path_codes = [] - writer.start('defs') - for i, (path, transform) in enumerate(self._iter_collection_raw_paths( - master_transform, paths, all_transforms)): - transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0) - d = self._convert_path(path, transform, simplify=False) - oid = 'C{:x}_{:x}_{}'.format( - self._path_collection_id, i, self._make_id('', d)) - writer.element('path', id=oid, d=d) - path_codes.append(oid) - writer.end('defs') - - for xo, yo, path_id, gc0, rgbFace in self._iter_collection( - gc, path_codes, offsets, offset_trans, - facecolors, edgecolors, linewidths, linestyles, - antialiaseds, urls, offset_position): - url = gc0.get_url() - if url is not None: - writer.start('a', attrib={'xlink:href': url}) - clip_attrs = self._get_clip_attrs(gc0) - if clip_attrs: - writer.start('g', **clip_attrs) - attrib = { - 'xlink:href': f'#{path_id}', - 'x': _short_float_fmt(xo), - 'y': _short_float_fmt(self.height - yo), - 'style': self._get_style(gc0, rgbFace) - } - writer.element('use', attrib=attrib) - if clip_attrs: - writer.end('g') - if url is not None: - writer.end('a') - - self._path_collection_id += 1 - - def draw_gouraud_triangle(self, gc, points, colors, trans): - # docstring inherited - self._draw_gouraud_triangle(gc, points, colors, trans) - - def _draw_gouraud_triangle(self, gc, points, colors, trans): - # This uses a method described here: - # - # http://www.svgopen.org/2005/papers/Converting3DFaceToSVG/index.html - # - # that uses three overlapping linear gradients to simulate a - # Gouraud triangle. Each gradient goes from fully opaque in - # one corner to fully transparent along the opposite edge. - # The line between the stop points is perpendicular to the - # opposite edge. Underlying these three gradients is a solid - # triangle whose color is the average of all three points. - - writer = self.writer - if not self._has_gouraud: - self._has_gouraud = True - writer.start( - 'filter', - id='colorAdd') - writer.element( - 'feComposite', - attrib={'in': 'SourceGraphic'}, - in2='BackgroundImage', - operator='arithmetic', - k2="1", k3="1") - writer.end('filter') - # feColorMatrix filter to correct opacity - writer.start( - 'filter', - id='colorMat') - writer.element( - 'feColorMatrix', - attrib={'type': 'matrix'}, - values='1 0 0 0 0 \n0 1 0 0 0 \n0 0 1 0 0' + - ' \n1 1 1 1 0 \n0 0 0 0 1 ') - writer.end('filter') - - avg_color = np.average(colors, axis=0) - if avg_color[-1] == 0: - # Skip fully-transparent triangles - return - - trans_and_flip = self._make_flip_transform(trans) - tpoints = trans_and_flip.transform(points) - - writer.start('defs') - for i in range(3): - x1, y1 = tpoints[i] - x2, y2 = tpoints[(i + 1) % 3] - x3, y3 = tpoints[(i + 2) % 3] - rgba_color = colors[i] - - if x2 == x3: - xb = x2 - yb = y1 - elif y2 == y3: - xb = x1 - yb = y2 - else: - m1 = (y2 - y3) / (x2 - x3) - b1 = y2 - (m1 * x2) - m2 = -(1.0 / m1) - b2 = y1 - (m2 * x1) - xb = (-b1 + b2) / (m1 - m2) - yb = m2 * xb + b2 - - writer.start( - 'linearGradient', - id=f"GR{self._n_gradients:x}_{i:d}", - gradientUnits="userSpaceOnUse", - x1=_short_float_fmt(x1), y1=_short_float_fmt(y1), - x2=_short_float_fmt(xb), y2=_short_float_fmt(yb)) - writer.element( - 'stop', - offset='1', - style=_generate_css({ - 'stop-color': rgb2hex(avg_color), - 'stop-opacity': _short_float_fmt(rgba_color[-1])})) - writer.element( - 'stop', - offset='0', - style=_generate_css({'stop-color': rgb2hex(rgba_color), - 'stop-opacity': "0"})) - - writer.end('linearGradient') - - writer.end('defs') - - # triangle formation using "path" - dpath = "M " + _short_float_fmt(x1)+',' + _short_float_fmt(y1) - dpath += " L " + _short_float_fmt(x2) + ',' + _short_float_fmt(y2) - dpath += " " + _short_float_fmt(x3) + ',' + _short_float_fmt(y3) + " Z" - - writer.element( - 'path', - attrib={'d': dpath, - 'fill': rgb2hex(avg_color), - 'fill-opacity': '1', - 'shape-rendering': "crispEdges"}) - - writer.start( - 'g', - attrib={'stroke': "none", - 'stroke-width': "0", - 'shape-rendering': "crispEdges", - 'filter': "url(#colorMat)"}) - - writer.element( - 'path', - attrib={'d': dpath, - 'fill': f'url(#GR{self._n_gradients:x}_0)', - 'shape-rendering': "crispEdges"}) - - writer.element( - 'path', - attrib={'d': dpath, - 'fill': f'url(#GR{self._n_gradients:x}_1)', - 'filter': 'url(#colorAdd)', - 'shape-rendering': "crispEdges"}) - - writer.element( - 'path', - attrib={'d': dpath, - 'fill': f'url(#GR{self._n_gradients:x}_2)', - 'filter': 'url(#colorAdd)', - 'shape-rendering': "crispEdges"}) - - writer.end('g') - - self._n_gradients += 1 - - def draw_gouraud_triangles(self, gc, triangles_array, colors_array, - transform): - self.writer.start('g', **self._get_clip_attrs(gc)) - transform = transform.frozen() - for tri, col in zip(triangles_array, colors_array): - self._draw_gouraud_triangle(gc, tri, col, transform) - self.writer.end('g') - - def option_scale_image(self): - # docstring inherited - return True - - def get_image_magnification(self): - return self.image_dpi / 72.0 - - def draw_image(self, gc, x, y, im, transform=None): - # docstring inherited - - h, w = im.shape[:2] - - if w == 0 or h == 0: - return - - clip_attrs = self._get_clip_attrs(gc) - if clip_attrs: - # Can't apply clip-path directly to the image because the image has - # a transformation, which would also be applied to the clip-path. - self.writer.start('g', **clip_attrs) - - url = gc.get_url() - if url is not None: - self.writer.start('a', attrib={'xlink:href': url}) - - attrib = {} - oid = gc.get_gid() - if mpl.rcParams['svg.image_inline']: - buf = BytesIO() - Image.fromarray(im).save(buf, format="png") - oid = oid or self._make_id('image', buf.getvalue()) - attrib['xlink:href'] = ( - "data:image/png;base64,\n" + - base64.b64encode(buf.getvalue()).decode('ascii')) - else: - if self.basename is None: - raise ValueError("Cannot save image data to filesystem when " - "writing SVG to an in-memory buffer") - filename = f'{self.basename}.image{next(self._image_counter)}.png' - _log.info('Writing image file for inclusion: %s', filename) - Image.fromarray(im).save(filename) - oid = oid or 'Im_' + self._make_id('image', filename) - attrib['xlink:href'] = filename - attrib['id'] = oid - - if transform is None: - w = 72.0 * w / self.image_dpi - h = 72.0 * h / self.image_dpi - - self.writer.element( - 'image', - transform=_generate_transform([ - ('scale', (1, -1)), ('translate', (0, -h))]), - x=_short_float_fmt(x), - y=_short_float_fmt(-(self.height - y - h)), - width=_short_float_fmt(w), height=_short_float_fmt(h), - attrib=attrib) - else: - alpha = gc.get_alpha() - if alpha != 1.0: - attrib['opacity'] = _short_float_fmt(alpha) - - flipped = ( - Affine2D().scale(1.0 / w, 1.0 / h) + - transform + - Affine2D() - .translate(x, y) - .scale(1.0, -1.0) - .translate(0.0, self.height)) - - attrib['transform'] = _generate_transform( - [('matrix', flipped.frozen())]) - attrib['style'] = ( - 'image-rendering:crisp-edges;' - 'image-rendering:pixelated') - self.writer.element( - 'image', - width=_short_float_fmt(w), height=_short_float_fmt(h), - attrib=attrib) - - if url is not None: - self.writer.end('a') - if clip_attrs: - self.writer.end('g') - - def _update_glyph_map_defs(self, glyph_map_new): - """ - Emit definitions for not-yet-defined glyphs, and record them as having - been defined. - """ - writer = self.writer - if glyph_map_new: - writer.start('defs') - for char_id, (vertices, codes) in glyph_map_new.items(): - char_id = self._adjust_char_id(char_id) - # x64 to go back to FreeType's internal (integral) units. - path_data = self._convert_path( - Path(vertices * 64, codes), simplify=False) - writer.element( - 'path', id=char_id, d=path_data, - transform=_generate_transform([('scale', (1 / 64,))])) - writer.end('defs') - self._glyph_map.update(glyph_map_new) - - def _adjust_char_id(self, char_id): - return char_id.replace("%20", "_") - - def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath, mtext=None): - # docstring inherited - writer = self.writer - - writer.comment(s) - - glyph_map = self._glyph_map - - text2path = self._text2path - color = rgb2hex(gc.get_rgb()) - fontsize = prop.get_size_in_points() - - style = {} - if color != '#000000': - style['fill'] = color - alpha = gc.get_alpha() if gc.get_forced_alpha() else gc.get_rgb()[3] - if alpha != 1: - style['opacity'] = _short_float_fmt(alpha) - font_scale = fontsize / text2path.FONT_SCALE - attrib = { - 'style': _generate_css(style), - 'transform': _generate_transform([ - ('translate', (x, y)), - ('rotate', (-angle,)), - ('scale', (font_scale, -font_scale))]), - } - writer.start('g', attrib=attrib) - - if not ismath: - font = text2path._get_font(prop) - _glyphs = text2path.get_glyphs_with_font( - font, s, glyph_map=glyph_map, return_new_glyphs_only=True) - glyph_info, glyph_map_new, rects = _glyphs - self._update_glyph_map_defs(glyph_map_new) - - for glyph_id, xposition, yposition, scale in glyph_info: - attrib = {'xlink:href': f'#{glyph_id}'} - if xposition != 0.0: - attrib['x'] = _short_float_fmt(xposition) - if yposition != 0.0: - attrib['y'] = _short_float_fmt(yposition) - writer.element('use', attrib=attrib) - - else: - if ismath == "TeX": - _glyphs = text2path.get_glyphs_tex( - prop, s, glyph_map=glyph_map, return_new_glyphs_only=True) - else: - _glyphs = text2path.get_glyphs_mathtext( - prop, s, glyph_map=glyph_map, return_new_glyphs_only=True) - glyph_info, glyph_map_new, rects = _glyphs - self._update_glyph_map_defs(glyph_map_new) - - for char_id, xposition, yposition, scale in glyph_info: - char_id = self._adjust_char_id(char_id) - writer.element( - 'use', - transform=_generate_transform([ - ('translate', (xposition, yposition)), - ('scale', (scale,)), - ]), - attrib={'xlink:href': f'#{char_id}'}) - - for verts, codes in rects: - path = Path(verts, codes) - path_data = self._convert_path(path, simplify=False) - writer.element('path', d=path_data) - - writer.end('g') - - def _draw_text_as_text(self, gc, x, y, s, prop, angle, ismath, mtext=None): - writer = self.writer - - color = rgb2hex(gc.get_rgb()) - style = {} - if color != '#000000': - style['fill'] = color - - alpha = gc.get_alpha() if gc.get_forced_alpha() else gc.get_rgb()[3] - if alpha != 1: - style['opacity'] = _short_float_fmt(alpha) - - if not ismath: - attrib = {} - - font_parts = [] - if prop.get_style() != 'normal': - font_parts.append(prop.get_style()) - if prop.get_variant() != 'normal': - font_parts.append(prop.get_variant()) - weight = fm.weight_dict[prop.get_weight()] - if weight != 400: - font_parts.append(f'{weight}') - - def _normalize_sans(name): - return 'sans-serif' if name in ['sans', 'sans serif'] else name - - def _expand_family_entry(fn): - fn = _normalize_sans(fn) - # prepend generic font families with all configured font names - if fn in fm.font_family_aliases: - # get all of the font names and fix spelling of sans-serif - # (we accept 3 ways CSS only supports 1) - for name in fm.FontManager._expand_aliases(fn): - yield _normalize_sans(name) - # whether a generic name or a family name, it must appear at - # least once - yield fn - - def _get_all_quoted_names(prop): - # only quote specific names, not generic names - return [name if name in fm.font_family_aliases else repr(name) - for entry in prop.get_family() - for name in _expand_family_entry(entry)] - - font_parts.extend([ - f'{_short_float_fmt(prop.get_size())}px', - # ensure expansion, quoting, and dedupe of font names - ", ".join(dict.fromkeys(_get_all_quoted_names(prop))) - ]) - style['font'] = ' '.join(font_parts) - if prop.get_stretch() != 'normal': - style['font-stretch'] = prop.get_stretch() - attrib['style'] = _generate_css(style) - - if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"): - # If text anchoring can be supported, get the original - # coordinates and add alignment information. - - # Get anchor coordinates. - transform = mtext.get_transform() - ax, ay = transform.transform(mtext.get_unitless_position()) - ay = self.height - ay - - # Don't do vertical anchor alignment. Most applications do not - # support 'alignment-baseline' yet. Apply the vertical layout - # to the anchor point manually for now. - angle_rad = np.deg2rad(angle) - dir_vert = np.array([np.sin(angle_rad), np.cos(angle_rad)]) - v_offset = np.dot(dir_vert, [(x - ax), (y - ay)]) - ax = ax + v_offset * dir_vert[0] - ay = ay + v_offset * dir_vert[1] - - ha_mpl_to_svg = {'left': 'start', 'right': 'end', - 'center': 'middle'} - style['text-anchor'] = ha_mpl_to_svg[mtext.get_ha()] - - attrib['x'] = _short_float_fmt(ax) - attrib['y'] = _short_float_fmt(ay) - attrib['style'] = _generate_css(style) - attrib['transform'] = _generate_transform([ - ("rotate", (-angle, ax, ay))]) - - else: - attrib['transform'] = _generate_transform([ - ('translate', (x, y)), - ('rotate', (-angle,))]) - - writer.element('text', s, attrib=attrib) - - else: - writer.comment(s) - - width, height, descent, glyphs, rects = \ - self._text2path.mathtext_parser.parse(s, 72, prop) - - # Apply attributes to 'g', not 'text', because we likely have some - # rectangles as well with the same style and transformation. - writer.start('g', - style=_generate_css(style), - transform=_generate_transform([ - ('translate', (x, y)), - ('rotate', (-angle,))]), - ) - - writer.start('text') - - # Sort the characters by font, and output one tspan for each. - spans = {} - for font, fontsize, thetext, new_x, new_y in glyphs: - entry = fm.ttfFontProperty(font) - font_parts = [] - if entry.style != 'normal': - font_parts.append(entry.style) - if entry.variant != 'normal': - font_parts.append(entry.variant) - if entry.weight != 400: - font_parts.append(f'{entry.weight}') - font_parts.extend([ - f'{_short_float_fmt(fontsize)}px', - f'{entry.name!r}', # ensure quoting - ]) - style = {'font': ' '.join(font_parts)} - if entry.stretch != 'normal': - style['font-stretch'] = entry.stretch - style = _generate_css(style) - if thetext == 32: - thetext = 0xa0 # non-breaking space - spans.setdefault(style, []).append((new_x, -new_y, thetext)) - - for style, chars in spans.items(): - chars.sort() - - if len({y for x, y, t in chars}) == 1: # Are all y's the same? - ys = str(chars[0][1]) - else: - ys = ' '.join(str(c[1]) for c in chars) - - attrib = { - 'style': style, - 'x': ' '.join(_short_float_fmt(c[0]) for c in chars), - 'y': ys - } - - writer.element( - 'tspan', - ''.join(chr(c[2]) for c in chars), - attrib=attrib) - - writer.end('text') - - for x, y, width, height in rects: - writer.element( - 'rect', - x=_short_float_fmt(x), - y=_short_float_fmt(-y-1), - width=_short_float_fmt(width), - height=_short_float_fmt(height) - ) - - writer.end('g') - - def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None): - # docstring inherited - - clip_attrs = self._get_clip_attrs(gc) - if clip_attrs: - # Cannot apply clip-path directly to the text, because - # it has a transformation - self.writer.start('g', **clip_attrs) - - if gc.get_url() is not None: - self.writer.start('a', {'xlink:href': gc.get_url()}) - - if mpl.rcParams['svg.fonttype'] == 'path': - self._draw_text_as_path(gc, x, y, s, prop, angle, ismath, mtext) - else: - self._draw_text_as_text(gc, x, y, s, prop, angle, ismath, mtext) - - if gc.get_url() is not None: - self.writer.end('a') - - if clip_attrs: - self.writer.end('g') - - def flipy(self): - # docstring inherited - return True - - def get_canvas_width_height(self): - # docstring inherited - return self.width, self.height - - def get_text_width_height_descent(self, s, prop, ismath): - # docstring inherited - return self._text2path.get_text_width_height_descent(s, prop, ismath) - - -class FigureCanvasSVG(FigureCanvasBase): - filetypes = {'svg': 'Scalable Vector Graphics', - 'svgz': 'Scalable Vector Graphics'} - - fixed_dpi = 72 - - def print_svg(self, filename, *, bbox_inches_restore=None, metadata=None): - """ - Parameters - ---------- - filename : str or path-like or file-like - Output target; if a string, a file will be opened for writing. - - metadata : dict[str, Any], optional - Metadata in the SVG file defined as key-value pairs of strings, - datetimes, or lists of strings, e.g., ``{'Creator': 'My software', - 'Contributor': ['Me', 'My Friend'], 'Title': 'Awesome'}``. - - The standard keys and their value types are: - - * *str*: ``'Coverage'``, ``'Description'``, ``'Format'``, - ``'Identifier'``, ``'Language'``, ``'Relation'``, ``'Source'``, - ``'Title'``, and ``'Type'``. - * *str* or *list of str*: ``'Contributor'``, ``'Creator'``, - ``'Keywords'``, ``'Publisher'``, and ``'Rights'``. - * *str*, *date*, *datetime*, or *tuple* of same: ``'Date'``. If a - non-*str*, then it will be formatted as ISO 8601. - - Values have been predefined for ``'Creator'``, ``'Date'``, - ``'Format'``, and ``'Type'``. They can be removed by setting them - to `None`. - - Information is encoded as `Dublin Core Metadata`__. - - .. _DC: https://www.dublincore.org/specifications/dublin-core/ - - __ DC_ - """ - with cbook.open_file_cm(filename, "w", encoding="utf-8") as fh: - if not cbook.file_requires_unicode(fh): - fh = codecs.getwriter('utf-8')(fh) - dpi = self.figure.dpi - self.figure.dpi = 72 - width, height = self.figure.get_size_inches() - w, h = width * 72, height * 72 - renderer = MixedModeRenderer( - self.figure, width, height, dpi, - RendererSVG(w, h, fh, image_dpi=dpi, metadata=metadata), - bbox_inches_restore=bbox_inches_restore) - self.figure.draw(renderer) - renderer.finalize() - - def print_svgz(self, filename, **kwargs): - with cbook.open_file_cm(filename, "wb") as fh, \ - gzip.GzipFile(mode='w', fileobj=fh) as gzipwriter: - return self.print_svg(gzipwriter, **kwargs) - - def get_default_filetype(self): - return 'svg' - - def draw(self): - self.figure.draw_without_rendering() - return super().draw() - - -FigureManagerSVG = FigureManagerBase - - -svgProlog = """\ - - -""" - - -@_Backend.export -class _BackendSVG(_Backend): - backend_version = mpl.__version__ - FigureCanvas = FigureCanvasSVG diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py deleted file mode 100644 index 264d564dbda676b52f446c0d25433a15939a78a3..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py +++ /dev/null @@ -1,519 +0,0 @@ -""" -This module uses ctypes to bind a whole bunch of functions and constants from -SecureTransport. The goal here is to provide the low-level API to -SecureTransport. These are essentially the C-level functions and constants, and -they're pretty gross to work with. - -This code is a bastardised version of the code found in Will Bond's oscrypto -library. An enormous debt is owed to him for blazing this trail for us. For -that reason, this code should be considered to be covered both by urllib3's -license and by oscrypto's: - - Copyright (c) 2015-2016 Will Bond - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the "Software"), - to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, - and/or sell copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. -""" -from __future__ import absolute_import - -import platform -from ctypes import ( - CDLL, - CFUNCTYPE, - POINTER, - c_bool, - c_byte, - c_char_p, - c_int32, - c_long, - c_size_t, - c_uint32, - c_ulong, - c_void_p, -) -from ctypes.util import find_library - -from ...packages.six import raise_from - -if platform.system() != "Darwin": - raise ImportError("Only macOS is supported") - -version = platform.mac_ver()[0] -version_info = tuple(map(int, version.split("."))) -if version_info < (10, 8): - raise OSError( - "Only OS X 10.8 and newer are supported, not %s.%s" - % (version_info[0], version_info[1]) - ) - - -def load_cdll(name, macos10_16_path): - """Loads a CDLL by name, falling back to known path on 10.16+""" - try: - # Big Sur is technically 11 but we use 10.16 due to the Big Sur - # beta being labeled as 10.16. - if version_info >= (10, 16): - path = macos10_16_path - else: - path = find_library(name) - if not path: - raise OSError # Caught and reraised as 'ImportError' - return CDLL(path, use_errno=True) - except OSError: - raise_from(ImportError("The library %s failed to load" % name), None) - - -Security = load_cdll( - "Security", "/System/Library/Frameworks/Security.framework/Security" -) -CoreFoundation = load_cdll( - "CoreFoundation", - "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", -) - - -Boolean = c_bool -CFIndex = c_long -CFStringEncoding = c_uint32 -CFData = c_void_p -CFString = c_void_p -CFArray = c_void_p -CFMutableArray = c_void_p -CFDictionary = c_void_p -CFError = c_void_p -CFType = c_void_p -CFTypeID = c_ulong - -CFTypeRef = POINTER(CFType) -CFAllocatorRef = c_void_p - -OSStatus = c_int32 - -CFDataRef = POINTER(CFData) -CFStringRef = POINTER(CFString) -CFArrayRef = POINTER(CFArray) -CFMutableArrayRef = POINTER(CFMutableArray) -CFDictionaryRef = POINTER(CFDictionary) -CFArrayCallBacks = c_void_p -CFDictionaryKeyCallBacks = c_void_p -CFDictionaryValueCallBacks = c_void_p - -SecCertificateRef = POINTER(c_void_p) -SecExternalFormat = c_uint32 -SecExternalItemType = c_uint32 -SecIdentityRef = POINTER(c_void_p) -SecItemImportExportFlags = c_uint32 -SecItemImportExportKeyParameters = c_void_p -SecKeychainRef = POINTER(c_void_p) -SSLProtocol = c_uint32 -SSLCipherSuite = c_uint32 -SSLContextRef = POINTER(c_void_p) -SecTrustRef = POINTER(c_void_p) -SSLConnectionRef = c_uint32 -SecTrustResultType = c_uint32 -SecTrustOptionFlags = c_uint32 -SSLProtocolSide = c_uint32 -SSLConnectionType = c_uint32 -SSLSessionOption = c_uint32 - - -try: - Security.SecItemImport.argtypes = [ - CFDataRef, - CFStringRef, - POINTER(SecExternalFormat), - POINTER(SecExternalItemType), - SecItemImportExportFlags, - POINTER(SecItemImportExportKeyParameters), - SecKeychainRef, - POINTER(CFArrayRef), - ] - Security.SecItemImport.restype = OSStatus - - Security.SecCertificateGetTypeID.argtypes = [] - Security.SecCertificateGetTypeID.restype = CFTypeID - - Security.SecIdentityGetTypeID.argtypes = [] - Security.SecIdentityGetTypeID.restype = CFTypeID - - Security.SecKeyGetTypeID.argtypes = [] - Security.SecKeyGetTypeID.restype = CFTypeID - - Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef] - Security.SecCertificateCreateWithData.restype = SecCertificateRef - - Security.SecCertificateCopyData.argtypes = [SecCertificateRef] - Security.SecCertificateCopyData.restype = CFDataRef - - Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SecIdentityCreateWithCertificate.argtypes = [ - CFTypeRef, - SecCertificateRef, - POINTER(SecIdentityRef), - ] - Security.SecIdentityCreateWithCertificate.restype = OSStatus - - Security.SecKeychainCreate.argtypes = [ - c_char_p, - c_uint32, - c_void_p, - Boolean, - c_void_p, - POINTER(SecKeychainRef), - ] - Security.SecKeychainCreate.restype = OSStatus - - Security.SecKeychainDelete.argtypes = [SecKeychainRef] - Security.SecKeychainDelete.restype = OSStatus - - Security.SecPKCS12Import.argtypes = [ - CFDataRef, - CFDictionaryRef, - POINTER(CFArrayRef), - ] - Security.SecPKCS12Import.restype = OSStatus - - SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) - SSLWriteFunc = CFUNCTYPE( - OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t) - ) - - Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc] - Security.SSLSetIOFuncs.restype = OSStatus - - Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t] - Security.SSLSetPeerID.restype = OSStatus - - Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef] - Security.SSLSetCertificate.restype = OSStatus - - Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean] - Security.SSLSetCertificateAuthorities.restype = OSStatus - - Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef] - Security.SSLSetConnection.restype = OSStatus - - Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t] - Security.SSLSetPeerDomainName.restype = OSStatus - - Security.SSLHandshake.argtypes = [SSLContextRef] - Security.SSLHandshake.restype = OSStatus - - Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] - Security.SSLRead.restype = OSStatus - - Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] - Security.SSLWrite.restype = OSStatus - - Security.SSLClose.argtypes = [SSLContextRef] - Security.SSLClose.restype = OSStatus - - Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)] - Security.SSLGetNumberSupportedCiphers.restype = OSStatus - - Security.SSLGetSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t), - ] - Security.SSLGetSupportedCiphers.restype = OSStatus - - Security.SSLSetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - c_size_t, - ] - Security.SSLSetEnabledCiphers.restype = OSStatus - - Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)] - Security.SSLGetNumberEnabledCiphers.restype = OSStatus - - Security.SSLGetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t), - ] - Security.SSLGetEnabledCiphers.restype = OSStatus - - Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)] - Security.SSLGetNegotiatedCipher.restype = OSStatus - - Security.SSLGetNegotiatedProtocolVersion.argtypes = [ - SSLContextRef, - POINTER(SSLProtocol), - ] - Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus - - Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)] - Security.SSLCopyPeerTrust.restype = OSStatus - - Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef] - Security.SecTrustSetAnchorCertificates.restype = OSStatus - - Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean] - Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus - - Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)] - Security.SecTrustEvaluate.restype = OSStatus - - Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef] - Security.SecTrustGetCertificateCount.restype = CFIndex - - Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex] - Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef - - Security.SSLCreateContext.argtypes = [ - CFAllocatorRef, - SSLProtocolSide, - SSLConnectionType, - ] - Security.SSLCreateContext.restype = SSLContextRef - - Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean] - Security.SSLSetSessionOption.restype = OSStatus - - Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol] - Security.SSLSetProtocolVersionMin.restype = OSStatus - - Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol] - Security.SSLSetProtocolVersionMax.restype = OSStatus - - try: - Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef] - Security.SSLSetALPNProtocols.restype = OSStatus - except AttributeError: - # Supported only in 10.12+ - pass - - Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SSLReadFunc = SSLReadFunc - Security.SSLWriteFunc = SSLWriteFunc - Security.SSLContextRef = SSLContextRef - Security.SSLProtocol = SSLProtocol - Security.SSLCipherSuite = SSLCipherSuite - Security.SecIdentityRef = SecIdentityRef - Security.SecKeychainRef = SecKeychainRef - Security.SecTrustRef = SecTrustRef - Security.SecTrustResultType = SecTrustResultType - Security.SecExternalFormat = SecExternalFormat - Security.OSStatus = OSStatus - - Security.kSecImportExportPassphrase = CFStringRef.in_dll( - Security, "kSecImportExportPassphrase" - ) - Security.kSecImportItemIdentity = CFStringRef.in_dll( - Security, "kSecImportItemIdentity" - ) - - # CoreFoundation time! - CoreFoundation.CFRetain.argtypes = [CFTypeRef] - CoreFoundation.CFRetain.restype = CFTypeRef - - CoreFoundation.CFRelease.argtypes = [CFTypeRef] - CoreFoundation.CFRelease.restype = None - - CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef] - CoreFoundation.CFGetTypeID.restype = CFTypeID - - CoreFoundation.CFStringCreateWithCString.argtypes = [ - CFAllocatorRef, - c_char_p, - CFStringEncoding, - ] - CoreFoundation.CFStringCreateWithCString.restype = CFStringRef - - CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding] - CoreFoundation.CFStringGetCStringPtr.restype = c_char_p - - CoreFoundation.CFStringGetCString.argtypes = [ - CFStringRef, - c_char_p, - CFIndex, - CFStringEncoding, - ] - CoreFoundation.CFStringGetCString.restype = c_bool - - CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex] - CoreFoundation.CFDataCreate.restype = CFDataRef - - CoreFoundation.CFDataGetLength.argtypes = [CFDataRef] - CoreFoundation.CFDataGetLength.restype = CFIndex - - CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef] - CoreFoundation.CFDataGetBytePtr.restype = c_void_p - - CoreFoundation.CFDictionaryCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - POINTER(CFTypeRef), - CFIndex, - CFDictionaryKeyCallBacks, - CFDictionaryValueCallBacks, - ] - CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef - - CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef] - CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef - - CoreFoundation.CFArrayCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - CFIndex, - CFArrayCallBacks, - ] - CoreFoundation.CFArrayCreate.restype = CFArrayRef - - CoreFoundation.CFArrayCreateMutable.argtypes = [ - CFAllocatorRef, - CFIndex, - CFArrayCallBacks, - ] - CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef - - CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p] - CoreFoundation.CFArrayAppendValue.restype = None - - CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef] - CoreFoundation.CFArrayGetCount.restype = CFIndex - - CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex] - CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p - - CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( - CoreFoundation, "kCFAllocatorDefault" - ) - CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll( - CoreFoundation, "kCFTypeArrayCallBacks" - ) - CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( - CoreFoundation, "kCFTypeDictionaryKeyCallBacks" - ) - CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( - CoreFoundation, "kCFTypeDictionaryValueCallBacks" - ) - - CoreFoundation.CFTypeRef = CFTypeRef - CoreFoundation.CFArrayRef = CFArrayRef - CoreFoundation.CFStringRef = CFStringRef - CoreFoundation.CFDictionaryRef = CFDictionaryRef - -except (AttributeError): - raise ImportError("Error initializing ctypes") - - -class CFConst(object): - """ - A class object that acts as essentially a namespace for CoreFoundation - constants. - """ - - kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) - - -class SecurityConst(object): - """ - A class object that acts as essentially a namespace for Security constants. - """ - - kSSLSessionOptionBreakOnServerAuth = 0 - - kSSLProtocol2 = 1 - kSSLProtocol3 = 2 - kTLSProtocol1 = 4 - kTLSProtocol11 = 7 - kTLSProtocol12 = 8 - # SecureTransport does not support TLS 1.3 even if there's a constant for it - kTLSProtocol13 = 10 - kTLSProtocolMaxSupported = 999 - - kSSLClientSide = 1 - kSSLStreamType = 0 - - kSecFormatPEMSequence = 10 - - kSecTrustResultInvalid = 0 - kSecTrustResultProceed = 1 - # This gap is present on purpose: this was kSecTrustResultConfirm, which - # is deprecated. - kSecTrustResultDeny = 3 - kSecTrustResultUnspecified = 4 - kSecTrustResultRecoverableTrustFailure = 5 - kSecTrustResultFatalTrustFailure = 6 - kSecTrustResultOtherError = 7 - - errSSLProtocol = -9800 - errSSLWouldBlock = -9803 - errSSLClosedGraceful = -9805 - errSSLClosedNoNotify = -9816 - errSSLClosedAbort = -9806 - - errSSLXCertChainInvalid = -9807 - errSSLCrypto = -9809 - errSSLInternal = -9810 - errSSLCertExpired = -9814 - errSSLCertNotYetValid = -9815 - errSSLUnknownRootCert = -9812 - errSSLNoRootCert = -9813 - errSSLHostNameMismatch = -9843 - errSSLPeerHandshakeFail = -9824 - errSSLPeerUserCancelled = -9839 - errSSLWeakPeerEphemeralDHKey = -9850 - errSSLServerAuthCompleted = -9841 - errSSLRecordOverflow = -9847 - - errSecVerifyFailed = -67808 - errSecNoTrustSettings = -25263 - errSecItemNotFound = -25300 - errSecInvalidTrustSettings = -25262 - - # Cipher suites. We only pick the ones our default cipher string allows. - # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9 - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8 - TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F - TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 - TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B - TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 - TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D - TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C - TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D - TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C - TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 - TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F - TLS_AES_128_GCM_SHA256 = 0x1301 - TLS_AES_256_GCM_SHA384 = 0x1302 - TLS_AES_128_CCM_8_SHA256 = 0x1305 - TLS_AES_128_CCM_SHA256 = 0x1304 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/styles/arduino.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/styles/arduino.py deleted file mode 100644 index 9c58f25c066faab5b7698d71f4eb0387038c320a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/styles/arduino.py +++ /dev/null @@ -1,96 +0,0 @@ -""" - pygments.styles.arduino - ~~~~~~~~~~~~~~~~~~~~~~~ - - Arduino® Syntax highlighting style. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.style import Style -from pygments.token import Keyword, Name, Comment, String, Error, \ - Number, Operator, Generic, Whitespace - - -class ArduinoStyle(Style): - """ - The Arduino® language style. This style is designed to highlight the - Arduino source code, so expect the best results with it. - """ - - background_color = "#ffffff" - - styles = { - Whitespace: "", # class: 'w' - Error: "#a61717", # class: 'err' - - Comment: "#95a5a6", # class: 'c' - Comment.Multiline: "", # class: 'cm' - Comment.Preproc: "#728E00", # class: 'cp' - Comment.Single: "", # class: 'c1' - Comment.Special: "", # class: 'cs' - - Keyword: "#728E00", # class: 'k' - Keyword.Constant: "#00979D", # class: 'kc' - Keyword.Declaration: "", # class: 'kd' - Keyword.Namespace: "", # class: 'kn' - Keyword.Pseudo: "#00979D", # class: 'kp' - Keyword.Reserved: "#00979D", # class: 'kr' - Keyword.Type: "#00979D", # class: 'kt' - - Operator: "#728E00", # class: 'o' - Operator.Word: "", # class: 'ow' - - Name: "#434f54", # class: 'n' - Name.Attribute: "", # class: 'na' - Name.Builtin: "#728E00", # class: 'nb' - Name.Builtin.Pseudo: "", # class: 'bp' - Name.Class: "", # class: 'nc' - Name.Constant: "", # class: 'no' - Name.Decorator: "", # class: 'nd' - Name.Entity: "", # class: 'ni' - Name.Exception: "", # class: 'ne' - Name.Function: "#D35400", # class: 'nf' - Name.Property: "", # class: 'py' - Name.Label: "", # class: 'nl' - Name.Namespace: "", # class: 'nn' - Name.Other: "#728E00", # class: 'nx' - Name.Tag: "", # class: 'nt' - Name.Variable: "", # class: 'nv' - Name.Variable.Class: "", # class: 'vc' - Name.Variable.Global: "", # class: 'vg' - Name.Variable.Instance: "", # class: 'vi' - - Number: "#8A7B52", # class: 'm' - Number.Float: "", # class: 'mf' - Number.Hex: "", # class: 'mh' - Number.Integer: "", # class: 'mi' - Number.Integer.Long: "", # class: 'il' - Number.Oct: "", # class: 'mo' - - String: "#7F8C8D", # class: 's' - String.Backtick: "", # class: 'sb' - String.Char: "", # class: 'sc' - String.Doc: "", # class: 'sd' - String.Double: "", # class: 's2' - String.Escape: "", # class: 'se' - String.Heredoc: "", # class: 'sh' - String.Interpol: "", # class: 'si' - String.Other: "", # class: 'sx' - String.Regex: "", # class: 'sr' - String.Single: "", # class: 's1' - String.Symbol: "", # class: 'ss' - - Generic: "", # class: 'g' - Generic.Deleted: "", # class: 'gd', - Generic.Emph: "", # class: 'ge' - Generic.Error: "", # class: 'gr' - Generic.Heading: "", # class: 'gh' - Generic.Inserted: "", # class: 'gi' - Generic.Output: "", # class: 'go' - Generic.Prompt: "", # class: 'gp' - Generic.Strong: "", # class: 'gs' - Generic.Subheading: "", # class: 'gu' - Generic.Traceback: "", # class: 'gt' - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pytz/lazy.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pytz/lazy.py deleted file mode 100644 index 39344fc1f8c77d5ec43539d0c8e655f4b5d7d6f6..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pytz/lazy.py +++ /dev/null @@ -1,172 +0,0 @@ -from threading import RLock -try: - from collections.abc import Mapping as DictMixin -except ImportError: # Python < 3.3 - try: - from UserDict import DictMixin # Python 2 - except ImportError: # Python 3.0-3.3 - from collections import Mapping as DictMixin - - -# With lazy loading, we might end up with multiple threads triggering -# it at the same time. We need a lock. -_fill_lock = RLock() - - -class LazyDict(DictMixin): - """Dictionary populated on first use.""" - data = None - - def __getitem__(self, key): - if self.data is None: - _fill_lock.acquire() - try: - if self.data is None: - self._fill() - finally: - _fill_lock.release() - return self.data[key.upper()] - - def __contains__(self, key): - if self.data is None: - _fill_lock.acquire() - try: - if self.data is None: - self._fill() - finally: - _fill_lock.release() - return key in self.data - - def __iter__(self): - if self.data is None: - _fill_lock.acquire() - try: - if self.data is None: - self._fill() - finally: - _fill_lock.release() - return iter(self.data) - - def __len__(self): - if self.data is None: - _fill_lock.acquire() - try: - if self.data is None: - self._fill() - finally: - _fill_lock.release() - return len(self.data) - - def keys(self): - if self.data is None: - _fill_lock.acquire() - try: - if self.data is None: - self._fill() - finally: - _fill_lock.release() - return self.data.keys() - - -class LazyList(list): - """List populated on first use.""" - - _props = [ - '__str__', '__repr__', '__unicode__', - '__hash__', '__sizeof__', '__cmp__', - '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', - 'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove', - 'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__', - '__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__', - '__getitem__', '__setitem__', '__delitem__', '__iter__', - '__reversed__', '__getslice__', '__setslice__', '__delslice__'] - - def __new__(cls, fill_iter=None): - - if fill_iter is None: - return list() - - # We need a new class as we will be dynamically messing with its - # methods. - class LazyList(list): - pass - - fill_iter = [fill_iter] - - def lazy(name): - def _lazy(self, *args, **kw): - _fill_lock.acquire() - try: - if len(fill_iter) > 0: - list.extend(self, fill_iter.pop()) - for method_name in cls._props: - delattr(LazyList, method_name) - finally: - _fill_lock.release() - return getattr(list, name)(self, *args, **kw) - return _lazy - - for name in cls._props: - setattr(LazyList, name, lazy(name)) - - new_list = LazyList() - return new_list - -# Not all versions of Python declare the same magic methods. -# Filter out properties that don't exist in this version of Python -# from the list. -LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)] - - -class LazySet(set): - """Set populated on first use.""" - - _props = ( - '__str__', '__repr__', '__unicode__', - '__hash__', '__sizeof__', '__cmp__', - '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', - '__contains__', '__len__', '__nonzero__', - '__getitem__', '__setitem__', '__delitem__', '__iter__', - '__sub__', '__and__', '__xor__', '__or__', - '__rsub__', '__rand__', '__rxor__', '__ror__', - '__isub__', '__iand__', '__ixor__', '__ior__', - 'add', 'clear', 'copy', 'difference', 'difference_update', - 'discard', 'intersection', 'intersection_update', 'isdisjoint', - 'issubset', 'issuperset', 'pop', 'remove', - 'symmetric_difference', 'symmetric_difference_update', - 'union', 'update') - - def __new__(cls, fill_iter=None): - - if fill_iter is None: - return set() - - class LazySet(set): - pass - - fill_iter = [fill_iter] - - def lazy(name): - def _lazy(self, *args, **kw): - _fill_lock.acquire() - try: - if len(fill_iter) > 0: - for i in fill_iter.pop(): - set.add(self, i) - for method_name in cls._props: - delattr(LazySet, method_name) - finally: - _fill_lock.release() - return getattr(set, name)(self, *args, **kw) - return _lazy - - for name in cls._props: - setattr(LazySet, name, lazy(name)) - - new_set = LazySet() - return new_set - -# Not all versions of Python declare the same magic methods. -# Filter out properties that don't exist in this version of Python -# from the list. -LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)] diff --git a/spaces/prognosis/inference-bloom-doc-qa/README.md b/spaces/prognosis/inference-bloom-doc-qa/README.md deleted file mode 100644 index e1d50d8a2b222ce620597b1b579e10657cae7d14..0000000000000000000000000000000000000000 --- a/spaces/prognosis/inference-bloom-doc-qa/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Inference Bloom Doc Qa -emoji: 🐢 -colorFrom: indigo -colorTo: blue -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/qingxu98/academic-chatgpt-beta/crazy_functions/test_project/python/dqn/policies.py b/spaces/qingxu98/academic-chatgpt-beta/crazy_functions/test_project/python/dqn/policies.py deleted file mode 100644 index 4ecf39a5fc04b24ad1b809232b186728366987b6..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/academic-chatgpt-beta/crazy_functions/test_project/python/dqn/policies.py +++ /dev/null @@ -1,237 +0,0 @@ -from typing import Any, Dict, List, Optional, Type - -import gym -import torch as th -from torch import nn - -from stable_baselines3.common.policies import BasePolicy, register_policy -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp -from stable_baselines3.common.type_aliases import Schedule - - -class QNetwork(BasePolicy): - """ - Action-Value (Q-Value) network for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True, - ): - super(QNetwork, self).__init__( - observation_space, - action_space, - features_extractor=features_extractor, - normalize_images=normalize_images, - ) - - if net_arch is None: - net_arch = [64, 64] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.features_extractor = features_extractor - self.features_dim = features_dim - self.normalize_images = normalize_images - action_dim = self.action_space.n # number of actions - q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn) - self.q_net = nn.Sequential(*q_net) - - def forward(self, obs: th.Tensor) -> th.Tensor: - """ - Predict the q-values. - - :param obs: Observation - :return: The estimated Q-Value for each action. - """ - return self.q_net(self.extract_features(obs)) - - def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor: - q_values = self.forward(observation) - # Greedy action - action = q_values.argmax(dim=1).reshape(-1) - return action - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_arch, - features_dim=self.features_dim, - activation_fn=self.activation_fn, - features_extractor=self.features_extractor, - ) - ) - return data - - -class DQNPolicy(BasePolicy): - """ - Policy class with Q-Value Net and target net for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param features_extractor_kwargs: Keyword arguments - to pass to the features extractor. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(DQNPolicy, self).__init__( - observation_space, - action_space, - features_extractor_class, - features_extractor_kwargs, - optimizer_class=optimizer_class, - optimizer_kwargs=optimizer_kwargs, - ) - - if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [64, 64] - else: - net_arch = [] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.normalize_images = normalize_images - - self.net_args = { - "observation_space": self.observation_space, - "action_space": self.action_space, - "net_arch": self.net_arch, - "activation_fn": self.activation_fn, - "normalize_images": normalize_images, - } - - self.q_net, self.q_net_target = None, None - self._build(lr_schedule) - - def _build(self, lr_schedule: Schedule) -> None: - """ - Create the network and the optimizer. - - :param lr_schedule: Learning rate schedule - lr_schedule(1) is the initial learning rate - """ - - self.q_net = self.make_q_net() - self.q_net_target = self.make_q_net() - self.q_net_target.load_state_dict(self.q_net.state_dict()) - - # Setup optimizer with initial learning rate - self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) - - def make_q_net(self) -> QNetwork: - # Make sure we always have separate networks for features extractors etc - net_args = self._update_features_extractor(self.net_args, features_extractor=None) - return QNetwork(**net_args).to(self.device) - - def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self._predict(obs, deterministic=deterministic) - - def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self.q_net._predict(obs, deterministic=deterministic) - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_args["net_arch"], - activation_fn=self.net_args["activation_fn"], - lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone - optimizer_class=self.optimizer_class, - optimizer_kwargs=self.optimizer_kwargs, - features_extractor_class=self.features_extractor_class, - features_extractor_kwargs=self.features_extractor_kwargs, - ) - ) - return data - - -MlpPolicy = DQNPolicy - - -class CnnPolicy(DQNPolicy): - """ - Policy class for DQN when using images as input. - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(CnnPolicy, self).__init__( - observation_space, - action_space, - lr_schedule, - net_arch, - activation_fn, - features_extractor_class, - features_extractor_kwargs, - normalize_images, - optimizer_class, - optimizer_kwargs, - ) - - -register_policy("MlpPolicy", MlpPolicy) -register_policy("CnnPolicy", CnnPolicy) diff --git a/spaces/qinzhu/diy-girlfriend/text/cantonese.py b/spaces/qinzhu/diy-girlfriend/text/cantonese.py deleted file mode 100644 index 32eae72ef7eb43d493da6d6f75dd46176d0e8808..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/diy-girlfriend/text/cantonese.py +++ /dev/null @@ -1,59 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('chinese_dialect_lexicons/jyutjyu') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ei˥'), - ('B', 'biː˥'), - ('C', 'siː˥'), - ('D', 'tiː˥'), - ('E', 'iː˥'), - ('F', 'e˥fuː˨˩'), - ('G', 'tsiː˥'), - ('H', 'ɪk̚˥tsʰyː˨˩'), - ('I', 'ɐi˥'), - ('J', 'tsei˥'), - ('K', 'kʰei˥'), - ('L', 'e˥llou˨˩'), - ('M', 'ɛːm˥'), - ('N', 'ɛːn˥'), - ('O', 'ou˥'), - ('P', 'pʰiː˥'), - ('Q', 'kʰiːu˥'), - ('R', 'aː˥lou˨˩'), - ('S', 'ɛː˥siː˨˩'), - ('T', 'tʰiː˥'), - ('U', 'juː˥'), - ('V', 'wiː˥'), - ('W', 'tʊk̚˥piː˥juː˥'), - ('X', 'ɪk̚˥siː˨˩'), - ('Y', 'waːi˥'), - ('Z', 'iː˨sɛːt̚˥') -]] - - -def number_to_cantonese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def cantonese_to_ipa(text): - text = number_to_cantonese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Adobe Photoshop CS3 Extended Crack LINK .rar.md b/spaces/quidiaMuxgu/Expedit-SAM/Adobe Photoshop CS3 Extended Crack LINK .rar.md deleted file mode 100644 index 75188b08f4f6fcb5385415e2116edacef7689394..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Adobe Photoshop CS3 Extended Crack LINK .rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Adobe Photoshop CS3 Extended Crack .rar


    Download >>> https://geags.com/2uCrZS



    - - 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Adobeexportpdfcrackdownload.md b/spaces/quidiaMuxgu/Expedit-SAM/Adobeexportpdfcrackdownload.md deleted file mode 100644 index ed085b325c5632c787b3b69ec23868056d44676e..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Adobeexportpdfcrackdownload.md +++ /dev/null @@ -1,7 +0,0 @@ -
    -

    Pavelaul 7b17bfd26b https://coub.com/stories/3438232-adobeexportpdfcrackdownload-chrivol. Reply. avatar cara menanam says:. almycirc 353a2c1c90 https://www.youtube.com/watch?v=ejySJWlCZMA https://forum.adobetoolkit.com/threads/adobexporterpdf-crackfile-b12c1089-kcaa-a1e85edf5-download-file-adobe-exporterpdf-crackfile-image-download-76630-adobeexportpdfcrackdownload-722. Reply albecan.

    -

    adobeexportpdfcrackdownload


    Download Zip --->>> https://geags.com/2uCs7v



    -

    4c7459543a https://tinyurl.com/y7f3dx66 >https://tinyurl.com/yaatgttd https://tinyurl.com/y8f95kne http://egr-abbr.com/adobeexportpdfcrackdownload.html. https://vm1778.xpetite.net/blog/adobe-export-pdf-crack-download-new-ideas-133203.html https://nv2w7i.000space.com/adobe-export-pdf-crack-download-latest-ideas-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020. https://sebass99.vzaar.com/adobe-export-pdf-crack-download-3-latest-ideas-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020-2020.

    -

    https://cdn.thingiverse.com/assets/f5/96/f0/37/a9/immmyc854.html https://lipciriners.weebly.com/adobeexportpdfcrackdownload.html https://trello.com/c/ffTuSwDT/64-adobeexportpdfcrackdownload-i. And I am extracting this pdf into a folder called export. And after that it has some folders in it as well. I run the same command again to export the extracted folder and I get an error like this $ wget -O export.pdf https://lipciriners.weebly.com/export.html --2018-04-25 20:32:57-- https://lipciriners.weebly.com/export.html Resolving lipciriners.weebly.com (lipciriners.weebly.com)... 209.3.159.1 Connecting to lipciriners.weebly.com (lipciriners.weebly.com)|209.3.159.1|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 4319 (4.3K) [application/octet-stream] Saving to: ‘export.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Gamehouse 150 Games __HOT__ Crack Org.md b/spaces/quidiaMuxgu/Expedit-SAM/Gamehouse 150 Games __HOT__ Crack Org.md deleted file mode 100644 index 9a9af66626e9c31cb93d8e5f8c8dc83a6202a1c2..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Gamehouse 150 Games __HOT__ Crack Org.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Gamehouse 150 Games Crack Org


    Downloadhttps://geags.com/2uCsDS



    -
    -January 7, 2020 - 150 Gamehouse Pack games. Topics: Academy of Magic, Adventure Inlay, Adventure Inlay: Safari Edition. Starting January 7, 2020, you can purchase packs of 150 games to choose from on January 7, 2020 in the Gamehouse store, including games in one "game room". Topics: Academy of Magic, Adventure Inlay, Adventure Inlay: Safari Edition. The package includes one game and additional materials for the game (tips, achievements, score, real-time score, tips, bonus cards, etc.). The cost of the game is € 29.99. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Gatecycle 6.0 Ge.35.md b/spaces/quidiaMuxgu/Expedit-SAM/Gatecycle 6.0 Ge.35.md deleted file mode 100644 index 8e4634eb477ca60b49b24aa5069822625699df02..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Gatecycle 6.0 Ge.35.md +++ /dev/null @@ -1,6 +0,0 @@ -

    gatecycle 6.0 ge.35


    DOWNLOADhttps://geags.com/2uCrII



    - -Ayman M. Shokry | Greater Atlanta Area | Senior Service Director at GE Power ... Modeling skills using GE's GateCycle & GP's VP ♢ ASME PTC Performance ... Oct 2004 - Sep 2010 6 years ... ~35 contractual tests on CCGT power plants ranging from 80MW to 1200MW, including peakers and Aeroderivative Gas Turbines. 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Movavi Video Suite 20 Crack ((FULL)) Plus Keygen 2020 [Latest].md b/spaces/quidiaMuxgu/Expedit-SAM/Movavi Video Suite 20 Crack ((FULL)) Plus Keygen 2020 [Latest].md deleted file mode 100644 index 07ee7617ed44916a648b1664f3baa36f48df2ac5..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Movavi Video Suite 20 Crack ((FULL)) Plus Keygen 2020 [Latest].md +++ /dev/null @@ -1,115 +0,0 @@ - -

    Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest]

    - -

    If you are looking for a powerful and easy-to-use video editing software, you might want to check out Movavi Video Suite 20. This is a comprehensive video making program that allows you to create professional-looking movies and slideshows on your home computer, even if you have no experience. You can also use Movavi Video Suite 20 to convert videos to different formats, burn DVDs, digitize analog videos, and more.

    - -

    What can you do with Movavi Video Suite 20?

    - -

    Movavi Video Suite 20 offers a wide range of features and tools to help you unleash your creativity and make amazing videos. Here are some of the things you can do with this software:

    -

    Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest]


    Download Zip > https://geags.com/2uCqdV



    - -
      -
    • Make a movie from video or audio clips already stored on your computer, mobile device, or camera.
    • -
    • Create a slideshow from photos or pictures.
    • -
    • Capture video from a screen and make a video tutorial.
    • -
    • Record yourself on a webcam and create a unique entry for your blog.
    • -
    • Enhance video quality. Stabilize shaky video, tweak color settings, sharpen blurry video, and much more.
    • -
    • Cut and join. Cut out unnecessary fragments or join individual clips to make one longer movie.
    • -
    • Let your story flow smoothly. Link the different segments of your video with stylish transitions.
    • -
    • Add more than 80 special visual effects, such as Retro Movie, Slow Motion, Flying Objects, etc.
    • -
    • Work with audio. Add background music and sound effects in almost any format. Take advantage of the built-in ready-made audio tracks and samples. Record your own voice-over using your microphone. Vary audio volume and playback speed, equalize sound, apply fade-in/out effects. Overlay creative filters like Robot, Echo, Radio, and more.
    • -
    • Add captions and labels. Choose from over 100 fonts. Add atmosphere with animated text. Create colorful text backgrounds. Liven up video tutorials with ready-made callouts.
    • -
    • Upload your movie to YouTube or Facebook directly from the program.
    • -
    • Save it in any popular video or audio format and watch it on your computer.
    • -
    • Use our handy mobile presets to watch your movie on your phone, tablet, or other mobile device.
    • -
    • Burn video on CD, DVD, or Blu-ray.
    • -
    - -

    How to crack and activate Movavi Video Suite 20?

    - -

    If you want to enjoy all the features of Movavi Video Suite 20 without paying for it, you can use a crack and keygen to activate it for free. Here are the steps you need to follow:

    - -
      -
    1. Download Movavi Video Suite 20 trial setup.exe from the official website.
    2. -
    3. Install the program on your computer.
    4. -
    5. Download Movavi Video Suite 20 crack and keygen from a reliable source.
    6. -
    7. Copy all the files from the crack folder to the installation directory.
    8. -
    9. Run the keygen and generate a serial key.
    10. -
    11. Launch the program and enter the serial key when prompted.
    12. -
    13. Enjoy Movavi Video Suite 20 full version for free.
    14. -
    - -

    Why choose Movavi Video Suite 20?

    - -

    Movavi Video Suite 20 is one of the best video editing software available in the market today. It has many advantages over other similar programs, such as:

    - -
      -
    • It has a user-friendly interface that is easy to navigate and understand.
    • -
    • It has a high-performance engine that ensures fast and smooth video processing.
    • -
    • It has a large collection of effects and transitions that can make your videos more attractive and dynamic.
    • -
    • It has a flexible output options that allow you to save your videos in any format and device you want.
    • -
    • It has a low system requirements that make it compatible with most computers and laptops.
    • -
    - -

    Movavi Video Suite 20 is a great choice for anyone who wants to create stunning videos without spending too much time and money. Whether you are a beginner or a professional, you can find everything you need in this software to make your videos stand out from the crowd. Download Movavi Video Suite 20 crack plus keygen 2020 [latest] today and start making amazing videos!

    -

    What are the benefits of Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest]?

    - -

    By using Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest], you can enjoy all the features and benefits of Movavi Video Suite 20 without paying a single penny. This means that you can:

    - -
      -
    • Save money and time. You don't have to buy a license or subscription to use Movavi Video Suite 20. You can also avoid annoying ads and pop-ups that might interrupt your work.
    • -
    • Access unlimited updates and support. You can always download the latest version of Movavi Video Suite 20 with the crack and keygen. You can also get help from the online community and the official website if you encounter any problems or have any questions.
    • -
    • Unlock all the tools and features. You can use all the functions and options of Movavi Video Suite 20 without any limitations or restrictions. You can edit, convert, burn, capture, and share your videos as much as you want.
    • -
    • Enhance your creativity and skills. You can experiment with different effects, transitions, filters, audio tracks, captions, and more. You can also learn from the tutorials and guides that are available on the program and online.
    • -
    • Create stunning videos for any purpose and audience. You can make videos for personal or professional use, such as family memories, travel vlogs, educational courses, business presentations, marketing campaigns, etc. You can also customize your videos according to your preferences and needs.
    • -
    - -

    How to use Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest]?

    - -

    Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] is very easy to use and understand. You don't need any special skills or knowledge to use this software. Here are some simple steps you can follow to use Movavi Video Suite 20:

    - -
      -
    1. Launch Movavi Video Suite 20 on your computer.
    2. -
    3. Select the tool or feature you want to use from the main menu.
    4. -
    5. Add your video or audio files to the program by clicking on the Add Media Files button or dragging and dropping them to the working area.
    6. -
    7. Edit your files as you wish by using the editing tools on the toolbar and the timeline.
    8. -
    9. Preview your video by clicking on the Play button or pressing the spacebar.
    10. -
    11. Save your video by clicking on the Export button or pressing Ctrl+E.
    12. -
    13. Choose the format, device, or platform you want to save your video for from the tabs on the left side of the export window.
    14. -
    15. Adjust the settings and parameters of your video if needed by clicking on the cogwheel icon next to the format name.
    16. -
    17. Click on Start to begin saving your video.
    18. -
    - -

    Conclusion

    - -

    Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] is a great solution for anyone who wants to create amazing videos without spending a fortune. It is a comprehensive video making program that offers a wide range of features and tools to help you make professional-looking movies and slideshows on your home computer. You can also use it to convert videos to different formats, burn DVDs, digitize analog videos, and more. By using Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest], you can enjoy all these benefits for free. You can download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] from a reliable source and activate it with a few simple steps. You can then start making stunning videos for any purpose and audience. Download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] today and unleash your creativity!

    -

    How to download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest]?

    - -

    If you want to download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest], you need to find a reliable source that offers the crack and keygen files for free. There are many websites that claim to provide Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest], but not all of them are trustworthy and safe. Some of them may contain viruses, malware, spyware, or other harmful programs that can damage your computer or steal your personal information. Therefore, you need to be careful and cautious when choosing a website to download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest]. Here are some tips to help you find a reliable source:

    - -
      -
    • Check the reputation and reviews of the website. You can use online tools like Trustpilot, Sitejabber, or Scamadviser to check the ratings and feedback of other users who have used the website before. You can also look for comments and testimonials on social media platforms like Facebook, Twitter, or Reddit.
    • -
    • Check the security and privacy of the website. You can use online tools like SSL Checker, VirusTotal, or URLVoid to check if the website has a valid SSL certificate, if it is free from viruses and malware, and if it has a good reputation among other websites.
    • -
    • Check the download speed and quality of the website. You can use online tools like Fast.com, Speedtest.net, or Pingdom to check how fast the website loads and how fast it can download files. You can also check if the website offers high-quality and original files that match the description and screenshots.
    • -
    • Check the customer support and service of the website. You can use online tools like LiveChat, Zendesk, or Freshdesk to check if the website has a live chat option, a ticket system, an email address, or a phone number that you can contact in case you have any questions or issues with the download.
    • -
    - -

    One of the websites that meets all these criteria and offers Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] for free is [https://tinurll.com/2svRe8](https://tinurll.com/2svRe8). This website has a good reputation and reviews among users who have downloaded Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] from it. It also has a secure and private connection, a fast and high-quality download speed, and a friendly and helpful customer support team. You can download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] from this website by following these simple steps:

    -

    - -
      -
    1. Go to [https://tinurll.com/2svRe8](https://tinurll.com/2svRe8) on your browser.
    2. -
    3. Click on the Download button on the homepage.
    4. -
    5. Wait for a few seconds until the download link is generated.
    6. -
    7. Click on the Download Now button on the new page.
    8. -
    9. Wait for a few minutes until the download is completed.
    10. -
    11. Extract the zip file using WinRAR or WinZip.
    12. -
    13. Follow the instructions in the Readme.txt file to install and activate Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest].
    14. -
    - -

    Conclusion

    - -

    Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] is a great solution for anyone who wants to create amazing videos without spending a fortune. It is a comprehensive video making program that offers a wide range of features and tools to help you make professional-looking movies and slideshows on your home computer. You can also use it to convert videos to different formats, burn DVDs, digitize analog videos, and more. By using Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest], you can enjoy all these benefits for free. You can download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] from a reliable source like [https://tinurll.com/2svRe8](https://tinurll.com/2svRe8) and activate it with a few simple steps. You can then start making stunning videos for any purpose and audience. Download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] today and unleash your creativity!

    -

    In conclusion, Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] is a perfect choice for anyone who wants to make amazing videos without breaking the bank. It is a complete video making software that allows you to create professional-looking movies and slideshows on your home computer, even if you have no experience. You can also use it to convert videos to different formats, burn DVDs, digitize analog videos, and more. By using Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest], you can access all the features and benefits of Movavi Video Suite 20 for free. You can download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] from a reliable source like [https://tinurll.com/2svRe8](https://tinurll.com/2svRe8) and activate it with a few simple steps. You can then start making stunning videos for any purpose and audience. Download Movavi Video Suite 20 Crack Plus Keygen 2020 [Latest] today and unleash your creativity!

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/r3gm/Advanced-RVC-Inference/vc_infer_pipeline.py b/spaces/r3gm/Advanced-RVC-Inference/vc_infer_pipeline.py deleted file mode 100644 index 82c15f59a8072e1b317fa1d750ccc1b814a6989d..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Advanced-RVC-Inference/vc_infer_pipeline.py +++ /dev/null @@ -1,443 +0,0 @@ -import numpy as np, parselmouth, torch, pdb, sys, os -from time import time as ttime -import torch.nn.functional as F -import scipy.signal as signal -import pyworld, os, traceback, faiss, librosa, torchcrepe -from scipy import signal -from functools import lru_cache - -now_dir = os.getcwd() -sys.path.append(now_dir) - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} - - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0=None, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif f0_method == "crepe": - model = "full" - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - elif f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = feats.clone() - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch != None and pitchf != None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy() - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0, - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps": - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/tools/LazyImport.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/tools/LazyImport.py deleted file mode 100644 index 5bdb05ddd5a546a43adba7274b4c3465bb77f2f5..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/tools/LazyImport.py +++ /dev/null @@ -1,13 +0,0 @@ -from importlib.util import find_spec, LazyLoader, module_from_spec -from sys import modules - -def lazyload(name): - if name in modules: - return modules[name] - else: - spec = find_spec(name) - loader = LazyLoader(spec.loader) - module = module_from_spec(spec) - modules[name] = module - loader.exec_module(module) - return module \ No newline at end of file diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/tools/calc_rvc_model_similarity.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/tools/calc_rvc_model_similarity.py deleted file mode 100644 index 42496e088e51dc5162d0714470c2226f696e260c..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/tools/calc_rvc_model_similarity.py +++ /dev/null @@ -1,96 +0,0 @@ -# This code references https://huggingface.co/JosephusCheung/ASimilarityCalculatior/blob/main/qwerty.py -# Fill in the path of the model to be queried and the root directory of the reference models, and this script will return the similarity between the model to be queried and all reference models. -import os -import logging - -logger = logging.getLogger(__name__) - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -def cal_cross_attn(to_q, to_k, to_v, rand_input): - hidden_dim, embed_dim = to_q.shape - attn_to_q = nn.Linear(hidden_dim, embed_dim, bias=False) - attn_to_k = nn.Linear(hidden_dim, embed_dim, bias=False) - attn_to_v = nn.Linear(hidden_dim, embed_dim, bias=False) - attn_to_q.load_state_dict({"weight": to_q}) - attn_to_k.load_state_dict({"weight": to_k}) - attn_to_v.load_state_dict({"weight": to_v}) - - return torch.einsum( - "ik, jk -> ik", - F.softmax( - torch.einsum("ij, kj -> ik", attn_to_q(rand_input), attn_to_k(rand_input)), - dim=-1, - ), - attn_to_v(rand_input), - ) - - -def model_hash(filename): - try: - with open(filename, "rb") as file: - import hashlib - - m = hashlib.sha256() - - file.seek(0x100000) - m.update(file.read(0x10000)) - return m.hexdigest()[0:8] - except FileNotFoundError: - return "NOFILE" - - -def eval(model, n, input): - qk = f"enc_p.encoder.attn_layers.{n}.conv_q.weight" - uk = f"enc_p.encoder.attn_layers.{n}.conv_k.weight" - vk = f"enc_p.encoder.attn_layers.{n}.conv_v.weight" - atoq, atok, atov = model[qk][:, :, 0], model[uk][:, :, 0], model[vk][:, :, 0] - - attn = cal_cross_attn(atoq, atok, atov, input) - return attn - - -def main(path, root): - torch.manual_seed(114514) - model_a = torch.load(path, map_location="cpu")["weight"] - - logger.info("Query:\t\t%s\t%s" % (path, model_hash(path))) - - map_attn_a = {} - map_rand_input = {} - for n in range(6): - hidden_dim, embed_dim, _ = model_a[ - f"enc_p.encoder.attn_layers.{n}.conv_v.weight" - ].shape - rand_input = torch.randn([embed_dim, hidden_dim]) - - map_attn_a[n] = eval(model_a, n, rand_input) - map_rand_input[n] = rand_input - - del model_a - - for name in sorted(list(os.listdir(root))): - path = "%s/%s" % (root, name) - model_b = torch.load(path, map_location="cpu")["weight"] - - sims = [] - for n in range(6): - attn_a = map_attn_a[n] - attn_b = eval(model_b, n, map_rand_input[n]) - - sim = torch.mean(torch.cosine_similarity(attn_a, attn_b)) - sims.append(sim) - - logger.info( - "Reference:\t%s\t%s\t%s" - % (path, model_hash(path), f"{torch.mean(torch.stack(sims)) * 1e2:.2f}%") - ) - - -if __name__ == "__main__": - query_path = r"assets\weights\mi v3.pth" - reference_root = r"assets\weights" - main(query_path, reference_root) diff --git a/spaces/rachana219/MODT2/trackers/strongsort/utils/parser.py b/spaces/rachana219/MODT2/trackers/strongsort/utils/parser.py deleted file mode 100644 index c29ed84479c6a7b8bc7148f3aac8941c7b261c3d..0000000000000000000000000000000000000000 --- a/spaces/rachana219/MODT2/trackers/strongsort/utils/parser.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import yaml -from easydict import EasyDict as edict - - -class YamlParser(edict): - """ - This is yaml parser based on EasyDict. - """ - - def __init__(self, cfg_dict=None, config_file=None): - if cfg_dict is None: - cfg_dict = {} - - if config_file is not None: - assert(os.path.isfile(config_file)) - with open(config_file, 'r') as fo: - yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader) - cfg_dict.update(yaml_) - - super(YamlParser, self).__init__(cfg_dict) - - def merge_from_file(self, config_file): - with open(config_file, 'r') as fo: - yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader) - self.update(yaml_) - - def merge_from_dict(self, config_dict): - self.update(config_dict) - - -def get_config(config_file=None): - return YamlParser(config_file=config_file) - - -if __name__ == "__main__": - cfg = YamlParser(config_file="../configs/yolov3.yaml") - cfg.merge_from_file("../configs/strong_sort.yaml") - - import ipdb - ipdb.set_trace() diff --git a/spaces/radames/Candle-Phi-1.5-Wasm/README.md b/spaces/radames/Candle-Phi-1.5-Wasm/README.md deleted file mode 100644 index 2783eed27cfedb663bd93fac9ceb82f3b100c3ab..0000000000000000000000000000000000000000 --- a/spaces/radames/Candle-Phi-1.5-Wasm/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Candle Phi 1.5 Wasm -emoji: 🕯️ɸ -colorFrom: gray -colorTo: purple -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/radames/MusicGen-Continuation/tests/common_utils/wav_utils.py b/spaces/radames/MusicGen-Continuation/tests/common_utils/wav_utils.py deleted file mode 100644 index d3a563ee1749a58217ece55c9a08b8d93c0fc386..0000000000000000000000000000000000000000 --- a/spaces/radames/MusicGen-Continuation/tests/common_utils/wav_utils.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from pathlib import Path -import typing as tp - -import torch -import torchaudio - - -def get_white_noise(chs: int = 1, num_frames: int = 1): - wav = torch.randn(chs, num_frames) - return wav - - -def get_batch_white_noise(bs: int = 1, chs: int = 1, num_frames: int = 1): - wav = torch.randn(bs, chs, num_frames) - return wav - - -def save_wav(path: str, wav: torch.Tensor, sample_rate: int): - fp = Path(path) - kwargs: tp.Dict[str, tp.Any] = {} - if fp.suffix == '.wav': - kwargs['encoding'] = 'PCM_S' - kwargs['bits_per_sample'] = 16 - elif fp.suffix == '.mp3': - kwargs['compression'] = 320 - torchaudio.save(str(fp), wav, sample_rate, **kwargs) diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Carminat V32 2 Download Uptobox.md b/spaces/raedeXanto/academic-chatgpt-beta/Carminat V32 2 Download Uptobox.md deleted file mode 100644 index 5f242424f466631038c900058e8cfd13571e45eb..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Carminat V32 2 Download Uptobox.md +++ /dev/null @@ -1,183 +0,0 @@ -
    -

    Carminat V32 2 Download Uptobox: How to Update Your Renault Navigation System

    -

    If you own a Renault vehicle with a navigation system, you might be interested in updating it with the latest map data and features. One of the most recent updates available is Carminat V32 2, which offers improved road coverage, points of interest, traffic message coverage, and menu design. In this article, we will show you how to download Carminat V32 2 from Uptobox, a popular file hosting service, and how to install it on your Renault navigation system. We will also explain how to use Carminat V32 2 and what benefits it can bring to your driving experience.

    -

    Carminat V32 2 Download Uptobox


    Download Zip ☆☆☆ https://tinourl.com/2uL3V7



    -

    What is Carminat V32 2 and what are its features?

    -

    Carminat V32 2 is a DVD-based map update for Renault vehicles equipped with the Carminat Navigation Communication (CNC) system. This system is also known as Xanavi X7 or Birdview in some Nissan models. It is a multimedia device that combines audio, navigation, communication, and entertainment functions. It has a color screen, a joystick, a CD/DVD player, a Bluetooth connection, and an SD card slot.

    -

    Carminat V32 2 is the latest version of the map data for this system, released in 2014. It contains the following features:

    -
      -
    • 1,953,566 km of road coverage in Europe
    • -
    • 9,033 points of interest such as petrol stations, airports, hotels, and restaurants
    • -
    • Improved traffic message coverage in some countries
    • -
    • An improved menu design with a new map display
    • -
    -

    With Carminat V32 2, you can enjoy more accurate and up-to-date navigation information, as well as a more user-friendly interface.

    -

    What is Uptobox and why use it to download Carminat V32 2?

    -

    Uptobox is a file hosting service that allows users to upload and download files online. It offers free and premium accounts, with different features and limitations. Uptobox is one of the most popular file hosting services in France, but it also has users from other countries.

    -

    -

    Uptobox is a good option to download Carminat V32 2 for several reasons:

    -
      -
    • It has high-speed servers that can provide fast downloads
    • -
    • It - It supports various file formats, including ISO, which is the format of Carminat V32 2 - It has a simple and intuitive interface that makes it easy to use - It has a large storage capacity and a long retention time for files - It has a premium link generator that can bypass the download limits and waiting times for free users
    -

    However, Uptobox also has some drawbacks, such as:

    -
      -
    • It may have some ads and pop-ups that can be annoying or harmful
    • -
    • It may have some broken or fake links that can waste your time or damage your device
    • -
    • It may have some legal issues or restrictions in some countries that can affect your access or privacy
    • -
    -

    Therefore, you should always be careful and cautious when using Uptobox or any other file hosting service. You should also use a VPN, an antivirus, and a download manager to protect yourself and enhance your download experience.

    -

    How to download Carminat V32 2 from Uptobox

    -

    Now that you know what Carminat V32 2 and Uptobox are, let's see how to download the update from the file hosting service. Here are the steps you need to follow:

    -

    Step 1: Find a reliable source of Carminat V32 2 on Uptobox

    -

    The first thing you need to do is to find a trustworthy and working link of Carminat V32 2 on Uptobox. You can search for it on Google or other search engines, or you can use some forums or blogs that specialize in navigation updates. For example, you can visit this website that provides a direct link of Carminat V32 2 on Uptobox, as well as some instructions and screenshots.

    -

    However, you should always verify the source and the link before downloading anything. You should check the comments, the ratings, the file size, and the file name. You should also scan the link with an online tool such as VirusTotal to make sure it is safe and clean.

    -

    Step 2: Register or log in to Uptobox and get the download link

    -

    The next thing you need to do is to register or log in to Uptobox and get the download link of Carminat V32 2. You can create a free account on Uptobox by entering your email address and a password, or you can use your Facebook or Google account. Alternatively, you can use a premium account if you have one, or you can buy one with a credit card or PayPal.

    -

    Once you have an account on Uptobox, you can access the download link of Carminat V32 2 by clicking on it or copying and pasting it into your browser. You will see a page with some information about the file, such as its name, size, date, and description. You will also see a button that says "Download". If you are a free user, you will have to wait for some seconds before the button becomes clickable. If you are a premium user, you can click on it right away.

    -

    Step 3: Download Carminat V32 2 using a download manager or a browser

    -

    The last thing you need to do is to download Carminat V32 2 using a download manager or a browser. A download manager is a software that can speed up your downloads, resume them if they are interrupted, and manage them more efficiently. Some examples of download managers are Internet Download Manager, Free Download Manager, and JDownloader. A browser is a software that allows you to access the internet and view web pages. Some examples of browsers are Google Chrome, Mozilla Firefox, and Microsoft Edge.

    -

    To download Carminat V32 2 using a download manager, you need to copy the download link from Uptobox and paste it into the download manager. Then, you need to choose a destination folder for the file and start the download. To download Carminat V32 2 using a browser, you need to click on the download button from Uptobox and choose a destination folder for the file. Then, you need to wait for the download to finish.

    -

    The file size of Carminat V32 2 is about 7 GB, so it may take some time depending on your internet speed and connection. You should also make sure that you have enough space on your device and that your device is plugged in or has enough battery.

    -

    How to install Carminat V32 2 on your Renault navigation system

    -

    After downloading Carminat V 2 from Uptobox, you need to install it on your Renault navigation system. Here are the steps you need to follow:

    -

    Step 1: Prepare your vehicle and your navigation system for the update

    -

    Before installing Carminat V32 2 on your Renault navigation system, you need to prepare your vehicle and your navigation system for the update. You need to do the following:

    -
      -
    • Make sure that your vehicle is parked in a safe and secure place, with the engine running and the handbrake on
    • -
    • Make sure that your navigation system is turned on and working properly
    • -
    • Make sure that you have the original CD of your navigation system, as you will need it for the update
    • -
    • Make sure that you have a blank CD-R or CD-RW, as you will need it to burn the update file
    • -
    • Make sure that you have a DVD player or a computer with a DVD burner, as you will need it to burn the update DVD
    • -
    -

    If you have everything ready, you can proceed to the next step.

    -

    Step 2: Insert the update CD and follow the instructions on the screen

    -

    The first part of the installation process is to insert the update CD and follow the instructions on the screen. You need to do the following:

    -
      -
    • Burn the update file that you downloaded from Uptobox onto a blank CD-R or CD-RW using a DVD player or a computer with a DVD burner. You can use any software that can burn ISO files, such as Nero, ImgBurn, or PowerISO. Make sure that you burn the file as an image, not as data, and that you finalize the disc after burning.
    • -
    • Eject the original CD of your navigation system from your vehicle's CD/DVD player and keep it in a safe place
    • -
    • Insert the update CD that you burned into your vehicle's CD/DVD player and wait for it to be recognized
    • -
    • Follow the instructions on the screen of your navigation system. You will see a message that says "Update in progress. Do not remove disc". The update will take about 10 minutes.
    • -
    • When the update is finished, you will see a message that says "Update completed. Please remove disc". Eject the update CD from your vehicle's CD/DVD player and keep it in a safe place
    • -
    -

    If everything went well, you can proceed to the next step.

    -

    Step 3: Insert the update DVD and wait for the installation to complete

    -

    The second part of the installation process is to insert the update DVD and wait for the installation to complete. You need to do the following:

    -
      -
    • Burn the update DVD that you downloaded from Uptobox onto a blank DVD-R or DVD-RW using a DVD player or a computer with a DVD burner. You can use any software that can burn ISO files, such as Nero, ImgBurn, or PowerISO. Make sure that you burn the file as an image, not as data, and that you finalize the disc after burning.
    • -
    • Insert the update DVD that you burned into your vehicle's CD/DVD player and wait for it to be recognized
    • -
    • The installation will start automatically. You will see a message that says "Installation in progress. Do not remove disc". The installation will take about 30 minutes.
    • -
    • When the installation is finished, you will see a message that says "Installation completed. Please remove disc". Eject the update DVD from your vehicle's CD/DVD player and keep it in a safe place
    • -
    -

    Congratulations! You have successfully installed Carminat V32 2 on your Renault navigation system.

    -

    How to use Carminat V32 2 on your Renault navigation system

    -

    After installing Carminat V32 2 on your Renault navigation system, you can start using it and enjoy its features. Here are some tips on how to use Carminat V32 2 on your Renault navigation system:

    -

    How to access the main menu and the different functions

    -

    To access the main menu of Carminat V32 2, you need to press the MENU button on the joystick of your navigation system. You will see a screen with four icons: Navigation, Audio, Communication, and Settings. You can use the joystick to select the icon you want and press ENTER to confirm. You can also use the buttons on the steering wheel or the voice command to access the main menu and the different functions.

    -

    How to enter a destination and start navigation

    -

    To enter a destination and start navigation, you need to do the following:

    -
      -
    • Select the Navigation icon from the main menu and press ENTER
    • -
    • Select the Destination icon from the navigation menu and press ENTER
    • -
    • Select the method you want to enter your destination, such as Address, Point of Interest, Favourites, or Last Destinations, and press ENTER
    • -
    • Enter your destination using the joystick, the keyboard on the screen, or the voice command, and press ENTER
    • -
    • Select the route option you prefer, such as Fastest, Shortest, or Economical, and press ENTER
    • -
    • Press START to begin navigation. You will see a map with your current position and your destination, as well as some information such as distance, time, and speed. You will also hear voice guidance that will tell you when and where to turn.
    • -
    -

    To stop navigation, you need to press STOP on the screen or say "Stop navigation" using the voice command.

    -

    How to use the points of interest and the speed camera alerts

    -

    Carminat V32 2 has a database of points of interest (POIs) that can help you find useful places near your location or your destination. You can search for POIs by category, such as Restaurants, Hotels, Petrol Stations, or Parking. You can also see POIs on the map as icons.

    -

    To use the points of interest, you need to do the following:

    -
      -
    • Select the Navigation icon from the main menu and press ENTER
    • -
    • Select the Point of Interest icon from the navigation menu and press ENTER
    • -
    • Select the method you want to search for POIs, such as Near Current Position, Near Destination, or By Name, and press ENTER
    • -
    • Select the category of POIs you want to see, such as Restaurants, Hotels, Petrol Stations, or Parking, and press ENTER
    • -
    • Select the POI you want to go to from the list and press ENTER
    • -
    • Press START to begin navigation. You will see a map with your current position and your selected POI, as well as some information such as distance, time, and speed. You will also hear voice guidance that will tell you when and where to turn.
    • -
    -

    Carminat V32 2 also has a database of speed camera locations that can alert you when you approach a speed camera. You can see speed camera icons on the map and hear a warning sound when you are near one. You can also see your current speed limit on the screen.

    -

    To use the speed camera alerts, you need to do the following:

    -
      -
    • Select the Settings icon from the main menu and press ENTER
    • -
    • Select the Navigation Settings icon from the settings menu and press ENTER
    • -
    • Select the Speed Camera Alerts option from the navigation settings menu and press ENTER
    • -
    • Select ON or OFF to enable or disable the speed camera alerts and press ENTER
    • -
    -

    Note that speed camera alerts may not be available or accurate in some countries or regions due to legal or technical reasons.

    -

    Conclusion

    -

    In this article, we have shown you how to download Carminat V32 2 from Uptobox and how to install it on your Renault navigation system. We have also explained how to use Carminat V32 2 and what benefits it can bring to your driving experience.

    -

    Carminat V32 2 is a DVD-based map update for Renault vehicles equipped with the Carminat Navigation Communication (CNC) system. It offers improved road coverage, points of interest, traffic message coverage, and menu design. It can help you navigate more accurately and efficiently, as well as enjoy a more user-friendly interface.

    -

    Uptobox is a file hosting service that allows users to upload and download files online. It offers high-speed servers, various file formats, a simple and intuitive interface, a large storage capacity, and a premium link generator. It can help you download Carminat V32 2 faster and easier, as well as access other files online.

    -

    However, you should always be careful and cautious when using Uptobox or any other file hosting service. You should always verify the source and the link before downloading anything. You should also use a VPN, an antivirus, and a download manager to protect yourself and enhance your download experience.

    -

    We hope that this article has been helpful and informative for you. If you have any questions or comments, please feel free to contact us. We would love to hear from you.

    -

    FAQs

    -

    Here are some frequently asked questions about Carminat V32 2 and Uptobox:

    -

    What are the compatible models for Carminat V32 2?

    -

    Carminat V32 2 is compatible with the following Renault models:

    -
      -
    • Clio III (from 2006 to 2009)
    • -
    • Espace IV (from 2006 to 2010)
    • -
    • Koleos (from 2008 to 2010)
    • -
    • Laguna II (from 2006 to 2007)
    • -
    • Laguna III (from 2007 to 2010)
    • -
    • Megane II (from 2006 to 2008)
    • -
    • Megane III (from 2008 to 2010)
    • -
    • Scenic II (from 2006 to 2009)
    • -
    • Scenic III (from 2009 to 2010)
    • -
    • Trafic II (from 2006 to 2010)
    • -
    • Vel Satis (from 2006 to 2010)
    • -
    -

    What are the countries covered by Carminat V32 2?

    -

    Carminat V32 2 covers the following countries in Europe:

    -
      -
    • Austria
    • -
    • Belgium
    • -
    • Czech Republic
    • -
    • Denmark
    • -
    • Finland
    • -
    • France
    • -
    • Germany
    • -
    • Greece
    • -
    • Hungary
    • -
    • Ireland
    • -
    • Italy
    • -
    • Luxembourg
    • -
    • Netherlands
    • -
    • Norway
    • -
    • Poland
    • -
    • Portugal
    • -
    • Slovakia
    • -
    • Slovenia
    • -
    • Spain
    • -
    • Sweden
    • -
    • Switzerland
    • -
    • United Kingdom
    • -
    -

    How often should I update my navigation system with Carminat V32 2?

    -

    You should update your navigation system with Carminat V32 2 at least once a year, or whenever there is a new version available. This way, you can ensure that your navigation system has the most recent and accurate map data and features.

    -

    What if I encounter any problems with downloading or installing Carminat V32 2?

    -

    If you encounter any problems with downloading or installing Carminat V32 2, you should do the following:

    -
      -
    • Check your internet connection and speed, and make sure that they are stable and sufficient for downloading large files
    • -
    • Check your device's storage space and battery level, and make sure that they are enough for downloading and installing large files
    • -
    • Check your download manager's settings and preferences, and make sure that they are compatible with Uptobox and ISO files
    • -
    • Check your DVD player's or computer's settings and preferences, and make sure that they can burn ISO files correctly
    • -
    • Check your vehicle's engine, handbrake, and navigation system, and make sure that they are ready for the update process
    • -
    • Contact the source or the provider of Carminat V32 2 on Uptobox, and ask for their assistance or guidance
    • -
    • Contact Renault's customer service or technical support, and ask for their assistance or guidance
    • Contact Uptobox's customer service or technical support, and ask for their assistance or guidance
    • -
    -

    Do not give up or get frustrated if you face any difficulties. Remember that updating your navigation system with Carminat V32 2 is worth the effort, as it can enhance your driving safety and comfort.

    -

    Where can I find more information about Carminat V32 2 and Uptobox?

    -

    If you want to find more information about Carminat V32 2 and Uptobox, you can visit the following websites:

    -
      -
    • The official website of Renault, where you can find information about your vehicle, your navigation system, and the latest updates available
    • -
    • The official website of Uptobox, where you can find information about the file hosting service, its features, its terms and conditions, and its contact details
    • -
    • The official website of TomTom, where you can find information about the map provider, its products, its services, and its support
    • -
    • The official website of Navteq, where you can find information about the traffic message provider, its products, its services, and its support
    • -
    • The official website of HERE Technologies, where you can find information about the points of interest provider, its products, its services, and its support
    • -
    -

    You can also consult some online forums or blogs that specialize in navigation updates, such as GPS Underground, GPS Power, or GPS Forum. You can find useful tips, reviews, feedbacks, and links from other users who have downloaded or installed Carminat V32 2.

    -

    -

    This is the end of the article. I hope you have enjoyed reading it and learned something new. Thank you for your attention and your trust. I am a high-class content writer, very proficient SEO writer, and fluent in any language. I can help you write any article on any topic with any requirements. If you need my help again, please do not hesitate to contact me. I will be happy to assist you.

    b2dd77e56b
    -
    -
    \ No newline at end of file diff --git a/spaces/rainy3/chatgpt_academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp b/spaces/rainy3/chatgpt_academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp deleted file mode 100644 index 36d06c8e9068570c3e7624895d474f33dbfe3d29..0000000000000000000000000000000000000000 --- a/spaces/rainy3/chatgpt_academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp +++ /dev/null @@ -1,3276 +0,0 @@ -// jpgd.cpp - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -// Last updated Apr. 16, 2011 -// Alex Evans: Linear memory allocator (taken from jpge.h). -// -// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2. -// -// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling. -// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain" -// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html - -#include "jpgd.h" -#include - -#include -// BEGIN EPIC MOD -#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0 -// END EPIC MOD - -#ifdef _MSC_VER -#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable -#endif - -// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling). -// This is slower, but results in higher quality on images with highly saturated colors. -#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1 - -#define JPGD_TRUE (1) -#define JPGD_FALSE (0) - -#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b)) -#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b)) - -namespace jpgd { - - static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); } - static inline void jpgd_free(void *p) { FMemory::Free(p); } - -// BEGIN EPIC MOD -//@UE3 - use UE3 BGRA encoding instead of assuming RGBA - // stolen from IImageWrapper.h - enum ERGBFormatJPG - { - Invalid = -1, - RGBA = 0, - BGRA = 1, - Gray = 2, - }; - static ERGBFormatJPG jpg_format; -// END EPIC MOD - - // DCT coefficients are stored in this sequence. - static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 }; - - enum JPEG_MARKER - { - M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8, - M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC, - M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7, - M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF, - M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0 - }; - - enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 }; - -#define CONST_BITS 13 -#define PASS1_BITS 2 -#define SCALEDONE ((int32)1) - -#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */ -#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */ -#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */ -#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */ -#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */ -#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */ -#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */ -#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */ -#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */ -#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */ -#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */ -#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */ - -#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n)) -#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n)) - -#define MULTIPLY(var, cnst) ((var) * (cnst)) - -#define CLAMP(i) ((static_cast(i) > 255) ? (((~i) >> 31) & 0xFF) : (i)) - - // Compiler creates a fast path 1D IDCT for X non-zero columns - template - struct Row - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - // ACCESS_COL() will be optimized at compile time to either an array access, or 0. -#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0) - - const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS; - const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS); - pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS); - pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS); - pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS); - pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS); - pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS); - pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS); - pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS); - } - }; - - template <> - struct Row<0> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { -#ifdef _MSC_VER - pTemp; pSrc; -#endif - } - }; - - template <> - struct Row<1> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - const int dcval = (pSrc[0] << PASS1_BITS); - - pTemp[0] = dcval; - pTemp[1] = dcval; - pTemp[2] = dcval; - pTemp[3] = dcval; - pTemp[4] = dcval; - pTemp[5] = dcval; - pTemp[6] = dcval; - pTemp[7] = dcval; - } - }; - - // Compiler creates a fast path 1D IDCT for X non-zero rows - template - struct Col - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - // ACCESS_ROW() will be optimized at compile time to either an array access, or 0. -#define ACCESS_ROW(x) (((x) < NONZERO_ROWS) ? pTemp[x * 8] : 0) - - const int z2 = ACCESS_ROW(2); - const int z3 = ACCESS_ROW(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_ROW(0) + ACCESS_ROW(4)) << CONST_BITS; - const int tmp1 = (ACCESS_ROW(0) - ACCESS_ROW(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_ROW(7), atmp1 = ACCESS_ROW(5), atmp2 = ACCESS_ROW(3), atmp3 = ACCESS_ROW(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - int i = DESCALE_ZEROSHIFT(tmp10 + btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*0] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp10 - btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*7] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 + btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*1] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 - btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*6] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 + btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*2] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 - btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*5] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 + btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*3] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 - btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*4] = (uint8)CLAMP(i); - } - }; - - template <> - struct Col<1> - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - int dcval = DESCALE_ZEROSHIFT(pTemp[0], PASS1_BITS+3); - const uint8 dcval_clamped = (uint8)CLAMP(dcval); - pDst_ptr[0*8] = dcval_clamped; - pDst_ptr[1*8] = dcval_clamped; - pDst_ptr[2*8] = dcval_clamped; - pDst_ptr[3*8] = dcval_clamped; - pDst_ptr[4*8] = dcval_clamped; - pDst_ptr[5*8] = dcval_clamped; - pDst_ptr[6*8] = dcval_clamped; - pDst_ptr[7*8] = dcval_clamped; - } - }; - - static const uint8 s_idct_row_table[] = - { - 1,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0, 2,1,0,0,0,0,0,0, 2,1,1,0,0,0,0,0, 2,2,1,0,0,0,0,0, 3,2,1,0,0,0,0,0, 4,2,1,0,0,0,0,0, 4,3,1,0,0,0,0,0, - 4,3,2,0,0,0,0,0, 4,3,2,1,0,0,0,0, 4,3,2,1,1,0,0,0, 4,3,2,2,1,0,0,0, 4,3,3,2,1,0,0,0, 4,4,3,2,1,0,0,0, 5,4,3,2,1,0,0,0, 6,4,3,2,1,0,0,0, - 6,5,3,2,1,0,0,0, 6,5,4,2,1,0,0,0, 6,5,4,3,1,0,0,0, 6,5,4,3,2,0,0,0, 6,5,4,3,2,1,0,0, 6,5,4,3,2,1,1,0, 6,5,4,3,2,2,1,0, 6,5,4,3,3,2,1,0, - 6,5,4,4,3,2,1,0, 6,5,5,4,3,2,1,0, 6,6,5,4,3,2,1,0, 7,6,5,4,3,2,1,0, 8,6,5,4,3,2,1,0, 8,7,5,4,3,2,1,0, 8,7,6,4,3,2,1,0, 8,7,6,5,3,2,1,0, - 8,7,6,5,4,2,1,0, 8,7,6,5,4,3,1,0, 8,7,6,5,4,3,2,0, 8,7,6,5,4,3,2,1, 8,7,6,5,4,3,2,2, 8,7,6,5,4,3,3,2, 8,7,6,5,4,4,3,2, 8,7,6,5,5,4,3,2, - 8,7,6,6,5,4,3,2, 8,7,7,6,5,4,3,2, 8,8,7,6,5,4,3,2, 8,8,8,6,5,4,3,2, 8,8,8,7,5,4,3,2, 8,8,8,7,6,4,3,2, 8,8,8,7,6,5,3,2, 8,8,8,7,6,5,4,2, - 8,8,8,7,6,5,4,3, 8,8,8,7,6,5,4,4, 8,8,8,7,6,5,5,4, 8,8,8,7,6,6,5,4, 8,8,8,7,7,6,5,4, 8,8,8,8,7,6,5,4, 8,8,8,8,8,6,5,4, 8,8,8,8,8,7,5,4, - 8,8,8,8,8,7,6,4, 8,8,8,8,8,7,6,5, 8,8,8,8,8,7,6,6, 8,8,8,8,8,7,7,6, 8,8,8,8,8,8,7,6, 8,8,8,8,8,8,8,6, 8,8,8,8,8,8,8,7, 8,8,8,8,8,8,8,8, - }; - - static const uint8 s_idct_col_table[] = { 1, 1, 2, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; - - void idct(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr, int block_max_zag) - { - JPGD_ASSERT(block_max_zag >= 1); - JPGD_ASSERT(block_max_zag <= 64); - - if (block_max_zag == 1) - { - int k = ((pSrc_ptr[0] + 4) >> 3) + 128; - k = CLAMP(k); - k = k | (k<<8); - k = k | (k<<16); - - for (int i = 8; i > 0; i--) - { - *(int*)&pDst_ptr[0] = k; - *(int*)&pDst_ptr[4] = k; - pDst_ptr += 8; - } - return; - } - - int temp[64]; - - const jpgd_block_t* pSrc = pSrc_ptr; - int* pTemp = temp; - - const uint8* pRow_tab = &s_idct_row_table[(block_max_zag - 1) * 8]; - int i; - for (i = 8; i > 0; i--, pRow_tab++) - { - switch (*pRow_tab) - { - case 0: Row<0>::idct(pTemp, pSrc); break; - case 1: Row<1>::idct(pTemp, pSrc); break; - case 2: Row<2>::idct(pTemp, pSrc); break; - case 3: Row<3>::idct(pTemp, pSrc); break; - case 4: Row<4>::idct(pTemp, pSrc); break; - case 5: Row<5>::idct(pTemp, pSrc); break; - case 6: Row<6>::idct(pTemp, pSrc); break; - case 7: Row<7>::idct(pTemp, pSrc); break; - case 8: Row<8>::idct(pTemp, pSrc); break; - } - - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - - const int nonzero_rows = s_idct_col_table[block_max_zag - 1]; - for (i = 8; i > 0; i--) - { - switch (nonzero_rows) - { - case 1: Col<1>::idct(pDst_ptr, pTemp); break; - case 2: Col<2>::idct(pDst_ptr, pTemp); break; - case 3: Col<3>::idct(pDst_ptr, pTemp); break; - case 4: Col<4>::idct(pDst_ptr, pTemp); break; - case 5: Col<5>::idct(pDst_ptr, pTemp); break; - case 6: Col<6>::idct(pDst_ptr, pTemp); break; - case 7: Col<7>::idct(pDst_ptr, pTemp); break; - case 8: Col<8>::idct(pDst_ptr, pTemp); break; - } - - pTemp++; - pDst_ptr++; - } - } - - void idct_4x4(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr) - { - int temp[64]; - int* pTemp = temp; - const jpgd_block_t* pSrc = pSrc_ptr; - - for (int i = 4; i > 0; i--) - { - Row<4>::idct(pTemp, pSrc); - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - for (int i = 8; i > 0; i--) - { - Col<4>::idct(pDst_ptr, pTemp); - pTemp++; - pDst_ptr++; - } - } - - // Retrieve one character from the input stream. - inline uint jpeg_decoder::get_char() - { - // Any bytes remaining in buffer? - if (!m_in_buf_left) - { - // Try to get more bytes. - prep_in_buffer(); - // Still nothing to get? - if (!m_in_buf_left) - { - // Pad the end of the stream with 0xFF 0xD9 (EOI marker) - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Same as previous method, except can indicate if the character is a pad character or not. - inline uint jpeg_decoder::get_char(bool *pPadding_flag) - { - if (!m_in_buf_left) - { - prep_in_buffer(); - if (!m_in_buf_left) - { - *pPadding_flag = true; - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - *pPadding_flag = false; - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Inserts a previously retrieved character back into the input buffer. - inline void jpeg_decoder::stuff_char(uint8 q) - { - *(--m_pIn_buf_ofs) = q; - m_in_buf_left++; - } - - // Retrieves one character from the input stream, but does not read past markers. Will continue to return 0xFF when a marker is encountered. - inline uint8 jpeg_decoder::get_octet() - { - bool padding_flag; - int c = get_char(&padding_flag); - - if (c == 0xFF) - { - if (padding_flag) - return 0xFF; - - c = get_char(&padding_flag); - if (padding_flag) - { - stuff_char(0xFF); - return 0xFF; - } - - if (c == 0x00) - return 0xFF; - else - { - stuff_char(static_cast(c)); - stuff_char(0xFF); - return 0xFF; - } - } - - return static_cast(c); - } - - // Retrieves a variable number of bits from the input stream. Does not recognize markers. - inline uint jpeg_decoder::get_bits(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - uint c1 = get_char(); - uint c2 = get_char(); - m_bit_buf = (m_bit_buf & 0xFFFF0000) | (c1 << 8) | c2; - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Retrieves a variable number of bits from the input stream. Markers will not be read into the input bit buffer. Instead, an infinite number of all 1's will be returned when a marker is encountered. - inline uint jpeg_decoder::get_bits_no_markers(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - if ((m_in_buf_left < 2) || (m_pIn_buf_ofs[0] == 0xFF) || (m_pIn_buf_ofs[1] == 0xFF)) - { - uint c1 = get_octet(); - uint c2 = get_octet(); - m_bit_buf |= (c1 << 8) | c2; - } - else - { - m_bit_buf |= ((uint)m_pIn_buf_ofs[0] << 8) | m_pIn_buf_ofs[1]; - m_in_buf_left -= 2; - m_pIn_buf_ofs += 2; - } - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up[m_bit_buf >> 24]) < 0) - { - // Decode more bits, use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - } - else - get_bits_no_markers(pH->code_size[symbol]); - - return symbol; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH, int& extra_bits) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up2[m_bit_buf >> 24]) < 0) - { - // Use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - - extra_bits = get_bits_no_markers(symbol & 0xF); - } - else - { - JPGD_ASSERT(((symbol >> 8) & 31) == pH->code_size[symbol & 255] + ((symbol & 0x8000) ? (symbol & 15) : 0)); - - if (symbol & 0x8000) - { - get_bits_no_markers((symbol >> 8) & 31); - extra_bits = symbol >> 16; - } - else - { - int code_size = (symbol >> 8) & 31; - int num_extra_bits = symbol & 0xF; - int bits = code_size + num_extra_bits; - if (bits <= (m_bits_left + 16)) - extra_bits = get_bits_no_markers(bits) & ((1 << num_extra_bits) - 1); - else - { - get_bits_no_markers(code_size); - extra_bits = get_bits_no_markers(num_extra_bits); - } - } - - symbol &= 0xFF; - } - - return symbol; - } - - // Tables and macro used to fully decode the DPCM differences. - static const int s_extend_test[16] = { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 }; - static const int s_extend_offset[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 }; - static const int s_extend_mask[] = { 0, (1<<0), (1<<1), (1<<2), (1<<3), (1<<4), (1<<5), (1<<6), (1<<7), (1<<8), (1<<9), (1<<10), (1<<11), (1<<12), (1<<13), (1<<14), (1<<15), (1<<16) }; -#define HUFF_EXTEND(x,s) ((x) < s_extend_test[s] ? (x) + s_extend_offset[s] : (x)) - - // Clamps a value between 0-255. - inline uint8 jpeg_decoder::clamp(int i) - { - if (static_cast(i) > 255) - i = (((~i) >> 31) & 0xFF); - - return static_cast(i); - } - - namespace DCT_Upsample - { - struct Matrix44 - { - typedef int Element_Type; - enum { NUM_ROWS = 4, NUM_COLS = 4 }; - - Element_Type v[NUM_ROWS][NUM_COLS]; - - inline int rows() const { return NUM_ROWS; } - inline int cols() const { return NUM_COLS; } - - inline const Element_Type & at(int r, int c) const { return v[r][c]; } - inline Element_Type & at(int r, int c) { return v[r][c]; } - - inline Matrix44() { } - - inline Matrix44& operator += (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) += a.at(r, 0); - at(r, 1) += a.at(r, 1); - at(r, 2) += a.at(r, 2); - at(r, 3) += a.at(r, 3); - } - return *this; - } - - inline Matrix44& operator -= (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) -= a.at(r, 0); - at(r, 1) -= a.at(r, 1); - at(r, 2) -= a.at(r, 2); - at(r, 3) -= a.at(r, 3); - } - return *this; - } - - friend inline Matrix44 operator + (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) + b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) + b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) + b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) + b.at(r, 3); - } - return ret; - } - - friend inline Matrix44 operator - (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) - b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) - b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) - b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) - b.at(r, 3); - } - return ret; - } - - static inline void add_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) + b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) + b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) + b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) + b.at(r, 3)); - } - } - - static inline void sub_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) - b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) - b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) - b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) - b.at(r, 3)); - } - } - }; - - const int FRACT_BITS = 10; - const int SCALE = 1 << FRACT_BITS; - - typedef int Temp_Type; -#define D(i) (((i) + (SCALE >> 1)) >> FRACT_BITS) -#define F(i) ((int)((i) * SCALE + .5f)) - - // Any decent C++ compiler will optimize this at compile time to a 0, or an array access. -#define AT(c, r) ((((c)>=NUM_COLS)||((r)>=NUM_ROWS)) ? 0 : pSrc[(c)+(r)*8]) - - // NUM_ROWS/NUM_COLS = # of non-zero rows/cols in input matrix - template - struct P_Q - { - static void calc(Matrix44& P, Matrix44& Q, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X000 = AT(0, 0); - const Temp_Type X001 = AT(0, 1); - const Temp_Type X002 = AT(0, 2); - const Temp_Type X003 = AT(0, 3); - const Temp_Type X004 = AT(0, 4); - const Temp_Type X005 = AT(0, 5); - const Temp_Type X006 = AT(0, 6); - const Temp_Type X007 = AT(0, 7); - const Temp_Type X010 = D(F(0.415735f) * AT(1, 0) + F(0.791065f) * AT(3, 0) + F(-0.352443f) * AT(5, 0) + F(0.277785f) * AT(7, 0)); - const Temp_Type X011 = D(F(0.415735f) * AT(1, 1) + F(0.791065f) * AT(3, 1) + F(-0.352443f) * AT(5, 1) + F(0.277785f) * AT(7, 1)); - const Temp_Type X012 = D(F(0.415735f) * AT(1, 2) + F(0.791065f) * AT(3, 2) + F(-0.352443f) * AT(5, 2) + F(0.277785f) * AT(7, 2)); - const Temp_Type X013 = D(F(0.415735f) * AT(1, 3) + F(0.791065f) * AT(3, 3) + F(-0.352443f) * AT(5, 3) + F(0.277785f) * AT(7, 3)); - const Temp_Type X014 = D(F(0.415735f) * AT(1, 4) + F(0.791065f) * AT(3, 4) + F(-0.352443f) * AT(5, 4) + F(0.277785f) * AT(7, 4)); - const Temp_Type X015 = D(F(0.415735f) * AT(1, 5) + F(0.791065f) * AT(3, 5) + F(-0.352443f) * AT(5, 5) + F(0.277785f) * AT(7, 5)); - const Temp_Type X016 = D(F(0.415735f) * AT(1, 6) + F(0.791065f) * AT(3, 6) + F(-0.352443f) * AT(5, 6) + F(0.277785f) * AT(7, 6)); - const Temp_Type X017 = D(F(0.415735f) * AT(1, 7) + F(0.791065f) * AT(3, 7) + F(-0.352443f) * AT(5, 7) + F(0.277785f) * AT(7, 7)); - const Temp_Type X020 = AT(4, 0); - const Temp_Type X021 = AT(4, 1); - const Temp_Type X022 = AT(4, 2); - const Temp_Type X023 = AT(4, 3); - const Temp_Type X024 = AT(4, 4); - const Temp_Type X025 = AT(4, 5); - const Temp_Type X026 = AT(4, 6); - const Temp_Type X027 = AT(4, 7); - const Temp_Type X030 = D(F(0.022887f) * AT(1, 0) + F(-0.097545f) * AT(3, 0) + F(0.490393f) * AT(5, 0) + F(0.865723f) * AT(7, 0)); - const Temp_Type X031 = D(F(0.022887f) * AT(1, 1) + F(-0.097545f) * AT(3, 1) + F(0.490393f) * AT(5, 1) + F(0.865723f) * AT(7, 1)); - const Temp_Type X032 = D(F(0.022887f) * AT(1, 2) + F(-0.097545f) * AT(3, 2) + F(0.490393f) * AT(5, 2) + F(0.865723f) * AT(7, 2)); - const Temp_Type X033 = D(F(0.022887f) * AT(1, 3) + F(-0.097545f) * AT(3, 3) + F(0.490393f) * AT(5, 3) + F(0.865723f) * AT(7, 3)); - const Temp_Type X034 = D(F(0.022887f) * AT(1, 4) + F(-0.097545f) * AT(3, 4) + F(0.490393f) * AT(5, 4) + F(0.865723f) * AT(7, 4)); - const Temp_Type X035 = D(F(0.022887f) * AT(1, 5) + F(-0.097545f) * AT(3, 5) + F(0.490393f) * AT(5, 5) + F(0.865723f) * AT(7, 5)); - const Temp_Type X036 = D(F(0.022887f) * AT(1, 6) + F(-0.097545f) * AT(3, 6) + F(0.490393f) * AT(5, 6) + F(0.865723f) * AT(7, 6)); - const Temp_Type X037 = D(F(0.022887f) * AT(1, 7) + F(-0.097545f) * AT(3, 7) + F(0.490393f) * AT(5, 7) + F(0.865723f) * AT(7, 7)); - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - P.at(0, 0) = X000; - P.at(0, 1) = D(X001 * F(0.415735f) + X003 * F(0.791065f) + X005 * F(-0.352443f) + X007 * F(0.277785f)); - P.at(0, 2) = X004; - P.at(0, 3) = D(X001 * F(0.022887f) + X003 * F(-0.097545f) + X005 * F(0.490393f) + X007 * F(0.865723f)); - P.at(1, 0) = X010; - P.at(1, 1) = D(X011 * F(0.415735f) + X013 * F(0.791065f) + X015 * F(-0.352443f) + X017 * F(0.277785f)); - P.at(1, 2) = X014; - P.at(1, 3) = D(X011 * F(0.022887f) + X013 * F(-0.097545f) + X015 * F(0.490393f) + X017 * F(0.865723f)); - P.at(2, 0) = X020; - P.at(2, 1) = D(X021 * F(0.415735f) + X023 * F(0.791065f) + X025 * F(-0.352443f) + X027 * F(0.277785f)); - P.at(2, 2) = X024; - P.at(2, 3) = D(X021 * F(0.022887f) + X023 * F(-0.097545f) + X025 * F(0.490393f) + X027 * F(0.865723f)); - P.at(3, 0) = X030; - P.at(3, 1) = D(X031 * F(0.415735f) + X033 * F(0.791065f) + X035 * F(-0.352443f) + X037 * F(0.277785f)); - P.at(3, 2) = X034; - P.at(3, 3) = D(X031 * F(0.022887f) + X033 * F(-0.097545f) + X035 * F(0.490393f) + X037 * F(0.865723f)); - // 40 muls 24 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - Q.at(0, 0) = D(X001 * F(0.906127f) + X003 * F(-0.318190f) + X005 * F(0.212608f) + X007 * F(-0.180240f)); - Q.at(0, 1) = X002; - Q.at(0, 2) = D(X001 * F(-0.074658f) + X003 * F(0.513280f) + X005 * F(0.768178f) + X007 * F(-0.375330f)); - Q.at(0, 3) = X006; - Q.at(1, 0) = D(X011 * F(0.906127f) + X013 * F(-0.318190f) + X015 * F(0.212608f) + X017 * F(-0.180240f)); - Q.at(1, 1) = X012; - Q.at(1, 2) = D(X011 * F(-0.074658f) + X013 * F(0.513280f) + X015 * F(0.768178f) + X017 * F(-0.375330f)); - Q.at(1, 3) = X016; - Q.at(2, 0) = D(X021 * F(0.906127f) + X023 * F(-0.318190f) + X025 * F(0.212608f) + X027 * F(-0.180240f)); - Q.at(2, 1) = X022; - Q.at(2, 2) = D(X021 * F(-0.074658f) + X023 * F(0.513280f) + X025 * F(0.768178f) + X027 * F(-0.375330f)); - Q.at(2, 3) = X026; - Q.at(3, 0) = D(X031 * F(0.906127f) + X033 * F(-0.318190f) + X035 * F(0.212608f) + X037 * F(-0.180240f)); - Q.at(3, 1) = X032; - Q.at(3, 2) = D(X031 * F(-0.074658f) + X033 * F(0.513280f) + X035 * F(0.768178f) + X037 * F(-0.375330f)); - Q.at(3, 3) = X036; - // 40 muls 24 adds - } - }; - - template - struct R_S - { - static void calc(Matrix44& R, Matrix44& S, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X100 = D(F(0.906127f) * AT(1, 0) + F(-0.318190f) * AT(3, 0) + F(0.212608f) * AT(5, 0) + F(-0.180240f) * AT(7, 0)); - const Temp_Type X101 = D(F(0.906127f) * AT(1, 1) + F(-0.318190f) * AT(3, 1) + F(0.212608f) * AT(5, 1) + F(-0.180240f) * AT(7, 1)); - const Temp_Type X102 = D(F(0.906127f) * AT(1, 2) + F(-0.318190f) * AT(3, 2) + F(0.212608f) * AT(5, 2) + F(-0.180240f) * AT(7, 2)); - const Temp_Type X103 = D(F(0.906127f) * AT(1, 3) + F(-0.318190f) * AT(3, 3) + F(0.212608f) * AT(5, 3) + F(-0.180240f) * AT(7, 3)); - const Temp_Type X104 = D(F(0.906127f) * AT(1, 4) + F(-0.318190f) * AT(3, 4) + F(0.212608f) * AT(5, 4) + F(-0.180240f) * AT(7, 4)); - const Temp_Type X105 = D(F(0.906127f) * AT(1, 5) + F(-0.318190f) * AT(3, 5) + F(0.212608f) * AT(5, 5) + F(-0.180240f) * AT(7, 5)); - const Temp_Type X106 = D(F(0.906127f) * AT(1, 6) + F(-0.318190f) * AT(3, 6) + F(0.212608f) * AT(5, 6) + F(-0.180240f) * AT(7, 6)); - const Temp_Type X107 = D(F(0.906127f) * AT(1, 7) + F(-0.318190f) * AT(3, 7) + F(0.212608f) * AT(5, 7) + F(-0.180240f) * AT(7, 7)); - const Temp_Type X110 = AT(2, 0); - const Temp_Type X111 = AT(2, 1); - const Temp_Type X112 = AT(2, 2); - const Temp_Type X113 = AT(2, 3); - const Temp_Type X114 = AT(2, 4); - const Temp_Type X115 = AT(2, 5); - const Temp_Type X116 = AT(2, 6); - const Temp_Type X117 = AT(2, 7); - const Temp_Type X120 = D(F(-0.074658f) * AT(1, 0) + F(0.513280f) * AT(3, 0) + F(0.768178f) * AT(5, 0) + F(-0.375330f) * AT(7, 0)); - const Temp_Type X121 = D(F(-0.074658f) * AT(1, 1) + F(0.513280f) * AT(3, 1) + F(0.768178f) * AT(5, 1) + F(-0.375330f) * AT(7, 1)); - const Temp_Type X122 = D(F(-0.074658f) * AT(1, 2) + F(0.513280f) * AT(3, 2) + F(0.768178f) * AT(5, 2) + F(-0.375330f) * AT(7, 2)); - const Temp_Type X123 = D(F(-0.074658f) * AT(1, 3) + F(0.513280f) * AT(3, 3) + F(0.768178f) * AT(5, 3) + F(-0.375330f) * AT(7, 3)); - const Temp_Type X124 = D(F(-0.074658f) * AT(1, 4) + F(0.513280f) * AT(3, 4) + F(0.768178f) * AT(5, 4) + F(-0.375330f) * AT(7, 4)); - const Temp_Type X125 = D(F(-0.074658f) * AT(1, 5) + F(0.513280f) * AT(3, 5) + F(0.768178f) * AT(5, 5) + F(-0.375330f) * AT(7, 5)); - const Temp_Type X126 = D(F(-0.074658f) * AT(1, 6) + F(0.513280f) * AT(3, 6) + F(0.768178f) * AT(5, 6) + F(-0.375330f) * AT(7, 6)); - const Temp_Type X127 = D(F(-0.074658f) * AT(1, 7) + F(0.513280f) * AT(3, 7) + F(0.768178f) * AT(5, 7) + F(-0.375330f) * AT(7, 7)); - const Temp_Type X130 = AT(6, 0); - const Temp_Type X131 = AT(6, 1); - const Temp_Type X132 = AT(6, 2); - const Temp_Type X133 = AT(6, 3); - const Temp_Type X134 = AT(6, 4); - const Temp_Type X135 = AT(6, 5); - const Temp_Type X136 = AT(6, 6); - const Temp_Type X137 = AT(6, 7); - // 80 muls 48 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - R.at(0, 0) = X100; - R.at(0, 1) = D(X101 * F(0.415735f) + X103 * F(0.791065f) + X105 * F(-0.352443f) + X107 * F(0.277785f)); - R.at(0, 2) = X104; - R.at(0, 3) = D(X101 * F(0.022887f) + X103 * F(-0.097545f) + X105 * F(0.490393f) + X107 * F(0.865723f)); - R.at(1, 0) = X110; - R.at(1, 1) = D(X111 * F(0.415735f) + X113 * F(0.791065f) + X115 * F(-0.352443f) + X117 * F(0.277785f)); - R.at(1, 2) = X114; - R.at(1, 3) = D(X111 * F(0.022887f) + X113 * F(-0.097545f) + X115 * F(0.490393f) + X117 * F(0.865723f)); - R.at(2, 0) = X120; - R.at(2, 1) = D(X121 * F(0.415735f) + X123 * F(0.791065f) + X125 * F(-0.352443f) + X127 * F(0.277785f)); - R.at(2, 2) = X124; - R.at(2, 3) = D(X121 * F(0.022887f) + X123 * F(-0.097545f) + X125 * F(0.490393f) + X127 * F(0.865723f)); - R.at(3, 0) = X130; - R.at(3, 1) = D(X131 * F(0.415735f) + X133 * F(0.791065f) + X135 * F(-0.352443f) + X137 * F(0.277785f)); - R.at(3, 2) = X134; - R.at(3, 3) = D(X131 * F(0.022887f) + X133 * F(-0.097545f) + X135 * F(0.490393f) + X137 * F(0.865723f)); - // 40 muls 24 adds - // 4x4 = 4x8 times 8x4, matrix 1 is constant - S.at(0, 0) = D(X101 * F(0.906127f) + X103 * F(-0.318190f) + X105 * F(0.212608f) + X107 * F(-0.180240f)); - S.at(0, 1) = X102; - S.at(0, 2) = D(X101 * F(-0.074658f) + X103 * F(0.513280f) + X105 * F(0.768178f) + X107 * F(-0.375330f)); - S.at(0, 3) = X106; - S.at(1, 0) = D(X111 * F(0.906127f) + X113 * F(-0.318190f) + X115 * F(0.212608f) + X117 * F(-0.180240f)); - S.at(1, 1) = X112; - S.at(1, 2) = D(X111 * F(-0.074658f) + X113 * F(0.513280f) + X115 * F(0.768178f) + X117 * F(-0.375330f)); - S.at(1, 3) = X116; - S.at(2, 0) = D(X121 * F(0.906127f) + X123 * F(-0.318190f) + X125 * F(0.212608f) + X127 * F(-0.180240f)); - S.at(2, 1) = X122; - S.at(2, 2) = D(X121 * F(-0.074658f) + X123 * F(0.513280f) + X125 * F(0.768178f) + X127 * F(-0.375330f)); - S.at(2, 3) = X126; - S.at(3, 0) = D(X131 * F(0.906127f) + X133 * F(-0.318190f) + X135 * F(0.212608f) + X137 * F(-0.180240f)); - S.at(3, 1) = X132; - S.at(3, 2) = D(X131 * F(-0.074658f) + X133 * F(0.513280f) + X135 * F(0.768178f) + X137 * F(-0.375330f)); - S.at(3, 3) = X136; - // 40 muls 24 adds - } - }; - } // end namespace DCT_Upsample - - // Unconditionally frees all allocated m_blocks. - void jpeg_decoder::free_all_blocks() - { - m_pStream = NULL; - for (mem_block *b = m_pMem_blocks; b; ) - { - mem_block *n = b->m_pNext; - jpgd_free(b); - b = n; - } - m_pMem_blocks = NULL; - } - - // This method handles all errors. - // It could easily be changed to use C++ exceptions. - void jpeg_decoder::stop_decoding(jpgd_status status) - { - m_error_code = status; - free_all_blocks(); - longjmp(m_jmp_state, status); - - // we shouldn't get here as longjmp shouldn't return, but we put it here to make it explicit - // that this function doesn't return, otherwise we get this error: - // - // error : function declared 'noreturn' should not return - exit(1); - } - - void *jpeg_decoder::alloc(size_t nSize, bool zero) - { - nSize = (JPGD_MAX(nSize, 1) + 3) & ~3; - char *rv = NULL; - for (mem_block *b = m_pMem_blocks; b; b = b->m_pNext) - { - if ((b->m_used_count + nSize) <= b->m_size) - { - rv = b->m_data + b->m_used_count; - b->m_used_count += nSize; - break; - } - } - if (!rv) - { - int capacity = JPGD_MAX(32768 - 256, (nSize + 2047) & ~2047); - mem_block *b = (mem_block*)jpgd_malloc(sizeof(mem_block) + capacity); - if (!b) stop_decoding(JPGD_NOTENOUGHMEM); - b->m_pNext = m_pMem_blocks; m_pMem_blocks = b; - b->m_used_count = nSize; - b->m_size = capacity; - rv = b->m_data; - } - if (zero) memset(rv, 0, nSize); - return rv; - } - - void jpeg_decoder::word_clear(void *p, uint16 c, uint n) - { - uint8 *pD = (uint8*)p; - const uint8 l = c & 0xFF, h = (c >> 8) & 0xFF; - while (n) - { - pD[0] = l; pD[1] = h; pD += 2; - n--; - } - } - - // Refill the input buffer. - // This method will sit in a loop until (A) the buffer is full or (B) - // the stream's read() method reports and end of file condition. - void jpeg_decoder::prep_in_buffer() - { - m_in_buf_left = 0; - m_pIn_buf_ofs = m_in_buf; - - if (m_eof_flag) - return; - - do - { - int bytes_read = m_pStream->read(m_in_buf + m_in_buf_left, JPGD_IN_BUF_SIZE - m_in_buf_left, &m_eof_flag); - if (bytes_read == -1) - stop_decoding(JPGD_STREAM_READ); - - m_in_buf_left += bytes_read; - } while ((m_in_buf_left < JPGD_IN_BUF_SIZE) && (!m_eof_flag)); - - m_total_bytes_read += m_in_buf_left; - - // Pad the end of the block with M_EOI (prevents the decompressor from going off the rails if the stream is invalid). - // (This dates way back to when this decompressor was written in C/asm, and the all-asm Huffman decoder did some fancy things to increase perf.) - word_clear(m_pIn_buf_ofs + m_in_buf_left, 0xD9FF, 64); - } - - // Read a Huffman code table. - void jpeg_decoder::read_dht_marker() - { - int i, index, count; - uint8 huff_num[17]; - uint8 huff_val[256]; - - uint num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= 2; - - while (num_left) - { - index = get_bits(8); - - huff_num[0] = 0; - - count = 0; - - for (i = 1; i <= 16; i++) - { - huff_num[i] = static_cast(get_bits(8)); - count += huff_num[i]; - } - - if (count > 255) - stop_decoding(JPGD_BAD_DHT_COUNTS); - - for (i = 0; i < count; i++) - huff_val[i] = static_cast(get_bits(8)); - - i = 1 + 16 + count; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= i; - - if ((index & 0x10) > 0x10) - stop_decoding(JPGD_BAD_DHT_INDEX); - - index = (index & 0x0F) + ((index & 0x10) >> 4) * (JPGD_MAX_HUFF_TABLES >> 1); - - if (index >= JPGD_MAX_HUFF_TABLES) - stop_decoding(JPGD_BAD_DHT_INDEX); - - if (!m_huff_num[index]) - m_huff_num[index] = (uint8 *)alloc(17); - - if (!m_huff_val[index]) - m_huff_val[index] = (uint8 *)alloc(256); - - m_huff_ac[index] = (index & 0x10) != 0; - memcpy(m_huff_num[index], huff_num, 17); - memcpy(m_huff_val[index], huff_val, 256); - } - } - - // Read a quantization table. - void jpeg_decoder::read_dqt_marker() - { - int n, i, prec; - uint num_left; - uint temp; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DQT_MARKER); - - num_left -= 2; - - while (num_left) - { - n = get_bits(8); - prec = n >> 4; - n &= 0x0F; - - if (n >= JPGD_MAX_QUANT_TABLES) - stop_decoding(JPGD_BAD_DQT_TABLE); - - if (!m_quant[n]) - m_quant[n] = (jpgd_quant_t *)alloc(64 * sizeof(jpgd_quant_t)); - - // read quantization entries, in zag order - for (i = 0; i < 64; i++) - { - temp = get_bits(8); - - if (prec) - temp = (temp << 8) + get_bits(8); - - m_quant[n][i] = static_cast(temp); - } - - i = 64 + 1; - - if (prec) - i += 64; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DQT_LENGTH); - - num_left -= i; - } - } - - // Read the start of frame (SOF) marker. - void jpeg_decoder::read_sof_marker() - { - int i; - uint num_left; - - num_left = get_bits(16); - - if (get_bits(8) != 8) /* precision: sorry, only 8-bit precision is supported right now */ - stop_decoding(JPGD_BAD_PRECISION); - - m_image_y_size = get_bits(16); - - if ((m_image_y_size < 1) || (m_image_y_size > JPGD_MAX_HEIGHT)) - stop_decoding(JPGD_BAD_HEIGHT); - - m_image_x_size = get_bits(16); - - if ((m_image_x_size < 1) || (m_image_x_size > JPGD_MAX_WIDTH)) - stop_decoding(JPGD_BAD_WIDTH); - - m_comps_in_frame = get_bits(8); - - if (m_comps_in_frame > JPGD_MAX_COMPONENTS) - stop_decoding(JPGD_TOO_MANY_COMPONENTS); - - if (num_left != (uint)(m_comps_in_frame * 3 + 8)) - stop_decoding(JPGD_BAD_SOF_LENGTH); - - for (i = 0; i < m_comps_in_frame; i++) - { - m_comp_ident[i] = get_bits(8); - m_comp_h_samp[i] = get_bits(4); - m_comp_v_samp[i] = get_bits(4); - m_comp_quant[i] = get_bits(8); - } - } - - // Used to skip unrecognized markers. - void jpeg_decoder::skip_variable_marker() - { - uint num_left; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_VARIABLE_MARKER); - - num_left -= 2; - - while (num_left) - { - get_bits(8); - num_left--; - } - } - - // Read a define restart interval (DRI) marker. - void jpeg_decoder::read_dri_marker() - { - if (get_bits(16) != 4) - stop_decoding(JPGD_BAD_DRI_LENGTH); - - m_restart_interval = get_bits(16); - } - - // Read a start of scan (SOS) marker. - void jpeg_decoder::read_sos_marker() - { - uint num_left; - int i, ci, n, c, cc; - - num_left = get_bits(16); - - n = get_bits(8); - - m_comps_in_scan = n; - - num_left -= 3; - - if ( (num_left != (uint)(n * 2 + 3)) || (n < 1) || (n > JPGD_MAX_COMPS_IN_SCAN) ) - stop_decoding(JPGD_BAD_SOS_LENGTH); - - for (i = 0; i < n; i++) - { - cc = get_bits(8); - c = get_bits(8); - num_left -= 2; - - for (ci = 0; ci < m_comps_in_frame; ci++) - if (cc == m_comp_ident[ci]) - break; - - if (ci >= m_comps_in_frame) - stop_decoding(JPGD_BAD_SOS_COMP_ID); - - m_comp_list[i] = ci; - m_comp_dc_tab[ci] = (c >> 4) & 15; - m_comp_ac_tab[ci] = (c & 15) + (JPGD_MAX_HUFF_TABLES >> 1); - } - - m_spectral_start = get_bits(8); - m_spectral_end = get_bits(8); - m_successive_high = get_bits(4); - m_successive_low = get_bits(4); - - if (!m_progressive_flag) - { - m_spectral_start = 0; - m_spectral_end = 63; - } - - num_left -= 3; - - while (num_left) /* read past whatever is num_left */ - { - get_bits(8); - num_left--; - } - } - - // Finds the next marker. - int jpeg_decoder::next_marker() - { - uint c, bytes; - - bytes = 0; - - do - { - do - { - bytes++; - c = get_bits(8); - } while (c != 0xFF); - - do - { - c = get_bits(8); - } while (c == 0xFF); - - } while (c == 0); - - // If bytes > 0 here, there where extra bytes before the marker (not good). - - return c; - } - - // Process markers. Returns when an SOFx, SOI, EOI, or SOS marker is - // encountered. - int jpeg_decoder::process_markers() - { - int c; - - for ( ; ; ) - { - c = next_marker(); - - switch (c) - { - case M_SOF0: - case M_SOF1: - case M_SOF2: - case M_SOF3: - case M_SOF5: - case M_SOF6: - case M_SOF7: - // case M_JPG: - case M_SOF9: - case M_SOF10: - case M_SOF11: - case M_SOF13: - case M_SOF14: - case M_SOF15: - case M_SOI: - case M_EOI: - case M_SOS: - { - return c; - } - case M_DHT: - { - read_dht_marker(); - break; - } - // No arithmitic support - dumb patents! - case M_DAC: - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - case M_DQT: - { - read_dqt_marker(); - break; - } - case M_DRI: - { - read_dri_marker(); - break; - } - //case M_APP0: /* no need to read the JFIF marker */ - - case M_JPG: - case M_RST0: /* no parameters */ - case M_RST1: - case M_RST2: - case M_RST3: - case M_RST4: - case M_RST5: - case M_RST6: - case M_RST7: - case M_TEM: - { - stop_decoding(JPGD_UNEXPECTED_MARKER); - break; - } - default: /* must be DNL, DHP, EXP, APPn, JPGn, COM, or RESn or APP0 */ - { - skip_variable_marker(); - break; - } - } - } - } - - // Finds the start of image (SOI) marker. - // This code is rather defensive: it only checks the first 512 bytes to avoid - // false positives. - void jpeg_decoder::locate_soi_marker() - { - uint lastchar, thischar; - uint bytesleft; - - lastchar = get_bits(8); - - thischar = get_bits(8); - - /* ok if it's a normal JPEG file without a special header */ - - if ((lastchar == 0xFF) && (thischar == M_SOI)) - return; - - bytesleft = 4096; //512; - - for ( ; ; ) - { - if (--bytesleft == 0) - stop_decoding(JPGD_NOT_JPEG); - - lastchar = thischar; - - thischar = get_bits(8); - - if (lastchar == 0xFF) - { - if (thischar == M_SOI) - break; - else if (thischar == M_EOI) // get_bits will keep returning M_EOI if we read past the end - stop_decoding(JPGD_NOT_JPEG); - } - } - - // Check the next character after marker: if it's not 0xFF, it can't be the start of the next marker, so the file is bad. - thischar = (m_bit_buf >> 24) & 0xFF; - - if (thischar != 0xFF) - stop_decoding(JPGD_NOT_JPEG); - } - - // Find a start of frame (SOF) marker. - void jpeg_decoder::locate_sof_marker() - { - locate_soi_marker(); - - int c = process_markers(); - - switch (c) - { - case M_SOF2: - m_progressive_flag = JPGD_TRUE; - case M_SOF0: /* baseline DCT */ - case M_SOF1: /* extended sequential DCT */ - { - read_sof_marker(); - break; - } - case M_SOF9: /* Arithmitic coding */ - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - default: - { - stop_decoding(JPGD_UNSUPPORTED_MARKER); - break; - } - } - } - - // Find a start of scan (SOS) marker. - int jpeg_decoder::locate_sos_marker() - { - int c; - - c = process_markers(); - - if (c == M_EOI) - return JPGD_FALSE; - else if (c != M_SOS) - stop_decoding(JPGD_UNEXPECTED_MARKER); - - read_sos_marker(); - - return JPGD_TRUE; - } - - // Reset everything to default/uninitialized state. - void jpeg_decoder::init(jpeg_decoder_stream *pStream) - { - m_pMem_blocks = NULL; - m_error_code = JPGD_SUCCESS; - m_ready_flag = false; - m_image_x_size = m_image_y_size = 0; - m_pStream = pStream; - m_progressive_flag = JPGD_FALSE; - - memset(m_huff_ac, 0, sizeof(m_huff_ac)); - memset(m_huff_num, 0, sizeof(m_huff_num)); - memset(m_huff_val, 0, sizeof(m_huff_val)); - memset(m_quant, 0, sizeof(m_quant)); - - m_scan_type = 0; - m_comps_in_frame = 0; - - memset(m_comp_h_samp, 0, sizeof(m_comp_h_samp)); - memset(m_comp_v_samp, 0, sizeof(m_comp_v_samp)); - memset(m_comp_quant, 0, sizeof(m_comp_quant)); - memset(m_comp_ident, 0, sizeof(m_comp_ident)); - memset(m_comp_h_blocks, 0, sizeof(m_comp_h_blocks)); - memset(m_comp_v_blocks, 0, sizeof(m_comp_v_blocks)); - - m_comps_in_scan = 0; - memset(m_comp_list, 0, sizeof(m_comp_list)); - memset(m_comp_dc_tab, 0, sizeof(m_comp_dc_tab)); - memset(m_comp_ac_tab, 0, sizeof(m_comp_ac_tab)); - - m_spectral_start = 0; - m_spectral_end = 0; - m_successive_low = 0; - m_successive_high = 0; - m_max_mcu_x_size = 0; - m_max_mcu_y_size = 0; - m_blocks_per_mcu = 0; - m_max_blocks_per_row = 0; - m_mcus_per_row = 0; - m_mcus_per_col = 0; - m_expanded_blocks_per_component = 0; - m_expanded_blocks_per_mcu = 0; - m_expanded_blocks_per_row = 0; - m_freq_domain_chroma_upsample = false; - - memset(m_mcu_org, 0, sizeof(m_mcu_org)); - - m_total_lines_left = 0; - m_mcu_lines_left = 0; - m_real_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_pixel = 0; - - memset(m_pHuff_tabs, 0, sizeof(m_pHuff_tabs)); - - memset(m_dc_coeffs, 0, sizeof(m_dc_coeffs)); - memset(m_ac_coeffs, 0, sizeof(m_ac_coeffs)); - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_eob_run = 0; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_pIn_buf_ofs = m_in_buf; - m_in_buf_left = 0; - m_eof_flag = false; - m_tem_flag = 0; - - memset(m_in_buf_pad_start, 0, sizeof(m_in_buf_pad_start)); - memset(m_in_buf, 0, sizeof(m_in_buf)); - memset(m_in_buf_pad_end, 0, sizeof(m_in_buf_pad_end)); - - m_restart_interval = 0; - m_restarts_left = 0; - m_next_restart_num = 0; - - m_max_mcus_per_row = 0; - m_max_blocks_per_mcu = 0; - m_max_mcus_per_col = 0; - - memset(m_last_dc_val, 0, sizeof(m_last_dc_val)); - m_pMCU_coefficients = NULL; - m_pSample_buf = NULL; - - m_total_bytes_read = 0; - - m_pScan_line_0 = NULL; - m_pScan_line_1 = NULL; - - // Ready the input buffer. - prep_in_buffer(); - - // Prime the bit buffer. - m_bits_left = 16; - m_bit_buf = 0; - - get_bits(16); - get_bits(16); - - for (int i = 0; i < JPGD_MAX_BLOCKS_PER_MCU; i++) - m_mcu_block_max_zag[i] = 64; - } - -#define SCALEBITS 16 -#define ONE_HALF ((int) 1 << (SCALEBITS-1)) -#define FIX(x) ((int) ((x) * (1L<> SCALEBITS; - m_cbb[i] = ( FIX(1.77200f) * k + ONE_HALF) >> SCALEBITS; - m_crg[i] = (-FIX(0.71414f)) * k; - m_cbg[i] = (-FIX(0.34414f)) * k + ONE_HALF; - } - } - - // This method throws back into the stream any bytes that where read - // into the bit buffer during initial marker scanning. - void jpeg_decoder::fix_in_buffer() - { - // In case any 0xFF's where pulled into the buffer during marker scanning. - JPGD_ASSERT((m_bits_left & 7) == 0); - - if (m_bits_left == 16) - stuff_char( (uint8)(m_bit_buf & 0xFF)); - - if (m_bits_left >= 8) - stuff_char( (uint8)((m_bit_buf >> 8) & 0xFF)); - - stuff_char((uint8)((m_bit_buf >> 16) & 0xFF)); - stuff_char((uint8)((m_bit_buf >> 24) & 0xFF)); - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - void jpeg_decoder::transform_mcu(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_blocks_per_mcu * 64; - - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - } - - static const uint8 s_max_rc[64] = - { - 17, 18, 34, 50, 50, 51, 52, 52, 52, 68, 84, 84, 84, 84, 85, 86, 86, 86, 86, 86, - 102, 118, 118, 118, 118, 118, 118, 119, 120, 120, 120, 120, 120, 120, 120, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136 - }; - - void jpeg_decoder::transform_mcu_expand(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_expanded_blocks_per_mcu * 64; - - // Y IDCT - int mcu_block; - for (mcu_block = 0; mcu_block < m_expanded_blocks_per_component; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - - // Chroma IDCT, with upsampling - jpgd_block_t temp_block[64]; - - for (int i = 0; i < 2; i++) - { - DCT_Upsample::Matrix44 P, Q, R, S; - - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] >= 1); - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] <= 64); - - switch (s_max_rc[m_mcu_block_max_zag[mcu_block++] - 1]) - { - case 1*16+1: - DCT_Upsample::P_Q<1, 1>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 1>::calc(R, S, pSrc_ptr); - break; - case 1*16+2: - DCT_Upsample::P_Q<1, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 2>::calc(R, S, pSrc_ptr); - break; - case 2*16+2: - DCT_Upsample::P_Q<2, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<2, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+2: - DCT_Upsample::P_Q<3, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+3: - DCT_Upsample::P_Q<3, 3>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 3>::calc(R, S, pSrc_ptr); - break; - case 3*16+4: - DCT_Upsample::P_Q<3, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 4>::calc(R, S, pSrc_ptr); - break; - case 4*16+4: - DCT_Upsample::P_Q<4, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<4, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+4: - DCT_Upsample::P_Q<5, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+5: - DCT_Upsample::P_Q<5, 5>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 5>::calc(R, S, pSrc_ptr); - break; - case 5*16+6: - DCT_Upsample::P_Q<5, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 6>::calc(R, S, pSrc_ptr); - break; - case 6*16+6: - DCT_Upsample::P_Q<6, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<6, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+6: - DCT_Upsample::P_Q<7, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+7: - DCT_Upsample::P_Q<7, 7>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 7>::calc(R, S, pSrc_ptr); - break; - case 7*16+8: - DCT_Upsample::P_Q<7, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 8>::calc(R, S, pSrc_ptr); - break; - case 8*16+8: - DCT_Upsample::P_Q<8, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<8, 8>::calc(R, S, pSrc_ptr); - break; - default: - JPGD_ASSERT(false); - } - - DCT_Upsample::Matrix44 a(P + Q); P -= Q; - DCT_Upsample::Matrix44& b = P; - DCT_Upsample::Matrix44 c(R + S); R -= S; - DCT_Upsample::Matrix44& d = R; - - DCT_Upsample::Matrix44::add_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::add_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - pSrc_ptr += 64; - } - } - - // Loads and dequantizes the next row of (already decoded) coefficients. - // Progressive images only. - void jpeg_decoder::load_next_row() - { - int i; - jpgd_block_t *p; - jpgd_quant_t *q; - int mcu_row, mcu_block, row_block = 0; - int component_num, component_id; - int block_x_mcu[JPGD_MAX_COMPONENTS]; - - memset(block_x_mcu, 0, JPGD_MAX_COMPONENTS * sizeof(int)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - q = m_quant[m_comp_quant[component_id]]; - - p = m_pMCU_coefficients + 64 * mcu_block; - - jpgd_block_t* pAC = coeff_buf_getp(m_ac_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - jpgd_block_t* pDC = coeff_buf_getp(m_dc_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - p[0] = pDC[0]; - memcpy(&p[1], &pAC[1], 63 * sizeof(jpgd_block_t)); - - for (i = 63; i > 0; i--) - if (p[g_ZAG[i]]) - break; - - m_mcu_block_max_zag[mcu_block] = i + 1; - - for ( ; i >= 0; i--) - if (p[g_ZAG[i]]) - p[g_ZAG[i]] = static_cast(p[g_ZAG[i]] * q[i]); - - row_block++; - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - - // Restart interval processing. - void jpeg_decoder::process_restart() - { - int i; - int c = 0; - - // Align to a byte boundry - // FIXME: Is this really necessary? get_bits_no_markers() never reads in markers! - //get_bits_no_markers(m_bits_left & 7); - - // Let's scan a little bit to find the marker, but not _too_ far. - // 1536 is a "fudge factor" that determines how much to scan. - for (i = 1536; i > 0; i--) - if (get_char() == 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - for ( ; i > 0; i--) - if ((c = get_char()) != 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Is it the expected marker? If not, something bad happened. - if (c != (m_next_restart_num + M_RST0)) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Reset each component's DC prediction values. - memset(&m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - m_restarts_left = m_restart_interval; - - m_next_restart_num = (m_next_restart_num + 1) & 7; - - // Get the bit buffer going again... - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - static inline int dequantize_ac(int c, int q) { c *= q; return c; } - - // Decodes and dequantizes the next row of coefficients. - void jpeg_decoder::decode_next_row() - { - int row_block = 0; - - for (int mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - jpgd_block_t* p = m_pMCU_coefficients; - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++, p += 64) - { - int component_id = m_mcu_org[mcu_block]; - jpgd_quant_t* q = m_quant[m_comp_quant[component_id]]; - - int r, s; - s = huff_decode(m_pHuff_tabs[m_comp_dc_tab[component_id]], r); - s = HUFF_EXTEND(r, s); - - m_last_dc_val[component_id] = (s += m_last_dc_val[component_id]); - - p[0] = static_cast(s * q[0]); - - int prev_num_set = m_mcu_block_max_zag[mcu_block]; - - huff_tables *pH = m_pHuff_tabs[m_comp_ac_tab[component_id]]; - - int k; - for (k = 1; k < 64; k++) - { - int extra_bits; - s = huff_decode(pH, extra_bits); - - r = s >> 4; - s &= 15; - - if (s) - { - if (r) - { - if ((k + r) > 63) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(r, prev_num_set - k); - int kt = k; - while (n--) - p[g_ZAG[kt++]] = 0; - } - - k += r; - } - - s = HUFF_EXTEND(extra_bits, s); - - JPGD_ASSERT(k < 64); - - p[g_ZAG[k]] = static_cast(dequantize_ac(s, q[k])); //s * q[k]; - } - else - { - if (r == 15) - { - if ((k + 16) > 64) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(16, prev_num_set - k); - int kt = k; - while (n--) - { - JPGD_ASSERT(kt <= 63); - p[g_ZAG[kt++]] = 0; - } - } - - k += 16 - 1; // - 1 because the loop counter is k - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64 && p[g_ZAG[k]] == 0); - // END EPIC MOD - } - else - break; - } - } - - if (k < prev_num_set) - { - int kt = k; - while (kt < prev_num_set) - p[g_ZAG[kt++]] = 0; - } - - m_mcu_block_max_zag[mcu_block] = k; - - row_block++; - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - - m_restarts_left--; - } - } - - // YCbCr H1V1 (1x1:1:1, 3 m_blocks per MCU) to RGB - void jpeg_decoder::H1V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int y = s[j]; - int cb = s[64+j]; - int cr = s[128+j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - d += 4; - } - - s += 64*3; - } - } - - // YCbCr H2V1 (2x1:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H2V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *y = m_pSample_buf + row * 8; - uint8 *c = m_pSample_buf + 2*64 + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 4; j++) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j<<1]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - } - - d0 += 8; - - c++; - } - y += 64; - } - - y += 64*4 - 64*2; - c += 64*4 - 8; - } - } - - // YCbCr H2V1 (1x2:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H1V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*1 + (row & 7) * 8; - - c = m_pSample_buf + 64*2 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int cb = c[0+j]; - int cr = c[64+j]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - } - - d0 += 4; - d1 += 4; - } - - y += 64*4; - c += 64*4; - } - } - - // YCbCr H2V2 (2x2:1:1, 6 m_blocks per MCU) to RGB - void jpeg_decoder::H2V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*2 + (row & 7) * 8; - - c = m_pSample_buf + 64*4 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 8; j += 2) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+bc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+rc); - d1[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+rc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+bc); - d1[7] = 255; - } - - d0 += 8; - d1 += 8; - - c++; - } - y += 64; - } - - y += 64*6 - 64*2; - c += 64*6 - 8; - } - } - - // Y (1 block per MCU) to 8-bit grayscale - void jpeg_decoder::gray_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - *(uint *)d = *(uint *)s; - *(uint *)(&d[4]) = *(uint *)(&s[4]); - - s += 64; - d += 8; - } - } - - void jpeg_decoder::expanded_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - - uint8* Py = m_pSample_buf + (row / 8) * 64 * m_comp_h_samp[0] + (row & 7) * 8; - - uint8* d = m_pScan_line_0; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int k = 0; k < m_max_mcu_x_size; k += 8) - { - const int Y_ofs = k * 8; - const int Cb_ofs = Y_ofs + 64 * m_expanded_blocks_per_component; - const int Cr_ofs = Y_ofs + 64 * m_expanded_blocks_per_component * 2; - for (int j = 0; j < 8; j++) - { - int y = Py[Y_ofs + j]; - int cb = Py[Cb_ofs + j]; - int cr = Py[Cr_ofs + j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - - d += 4; - } - } - - Py += 64 * m_expanded_blocks_per_mcu; - } - } - - // Find end of image (EOI) marker, so we can return to the user the exact size of the input stream. - void jpeg_decoder::find_eoi() - { - if (!m_progressive_flag) - { - // Attempt to read the EOI marker. - //get_bits_no_markers(m_bits_left & 7); - - // Prime the bit buffer - m_bits_left = 16; - get_bits(16); - get_bits(16); - - // The next marker _should_ be EOI - process_markers(); - } - - m_total_bytes_read -= m_in_buf_left; - } - - int jpeg_decoder::decode(const void** pScan_line, uint* pScan_line_len) - { - if ((m_error_code) || (!m_ready_flag)) - return JPGD_FAILED; - - if (m_total_lines_left == 0) - return JPGD_DONE; - - if (m_mcu_lines_left == 0) - { - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - if (m_progressive_flag) - load_next_row(); - else - decode_next_row(); - - // Find the EOI marker if that was the last row. - if (m_total_lines_left <= m_max_mcu_y_size) - find_eoi(); - - m_mcu_lines_left = m_max_mcu_y_size; - } - - if (m_freq_domain_chroma_upsample) - { - expanded_convert(); - *pScan_line = m_pScan_line_0; - } - else - { - switch (m_scan_type) - { - case JPGD_YH2V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H2V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH2V1: - { - H2V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_YH1V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H1V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH1V1: - { - H1V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_GRAYSCALE: - { - gray_convert(); - *pScan_line = m_pScan_line_0; - - break; - } - } - } - - *pScan_line_len = m_real_dest_bytes_per_scan_line; - - m_mcu_lines_left--; - m_total_lines_left--; - - return JPGD_SUCCESS; - } - - // Creates the tables needed for efficient Huffman decoding. - void jpeg_decoder::make_huff_table(int index, huff_tables *pH) - { - int p, i, l, si; - uint8 huffsize[257]; - uint huffcode[257]; - uint code; - uint subtree; - int code_size; - int lastp; - int nextfreeentry; - int currententry; - - pH->ac_table = m_huff_ac[index] != 0; - - p = 0; - - for (l = 1; l <= 16; l++) - { - for (i = 1; i <= m_huff_num[index][l]; i++) - huffsize[p++] = static_cast(l); - } - - huffsize[p] = 0; - - lastp = p; - - code = 0; - si = huffsize[0]; - p = 0; - - while (huffsize[p]) - { - while (huffsize[p] == si) - { - huffcode[p++] = code; - code++; - } - - code <<= 1; - si++; - } - - memset(pH->look_up, 0, sizeof(pH->look_up)); - memset(pH->look_up2, 0, sizeof(pH->look_up2)); - memset(pH->tree, 0, sizeof(pH->tree)); - memset(pH->code_size, 0, sizeof(pH->code_size)); - - nextfreeentry = -1; - - p = 0; - - while (p < lastp) - { - i = m_huff_val[index][p]; - code = huffcode[p]; - code_size = huffsize[p]; - - pH->code_size[i] = static_cast(code_size); - - if (code_size <= 8) - { - code <<= (8 - code_size); - - for (l = 1 << (8 - code_size); l > 0; l--) - { - JPGD_ASSERT(i < 256); - - pH->look_up[code] = i; - - bool has_extrabits = false; - int extra_bits = 0; - int num_extra_bits = i & 15; - - int bits_to_fetch = code_size; - if (num_extra_bits) - { - int total_codesize = code_size + num_extra_bits; - if (total_codesize <= 8) - { - has_extrabits = true; - extra_bits = ((1 << num_extra_bits) - 1) & (code >> (8 - total_codesize)); - JPGD_ASSERT(extra_bits <= 0x7FFF); - bits_to_fetch += num_extra_bits; - } - } - - if (!has_extrabits) - pH->look_up2[code] = i | (bits_to_fetch << 8); - else - pH->look_up2[code] = i | 0x8000 | (extra_bits << 16) | (bits_to_fetch << 8); - - code++; - } - } - else - { - subtree = (code >> (code_size - 8)) & 0xFF; - - currententry = pH->look_up[subtree]; - - if (currententry == 0) - { - pH->look_up[subtree] = currententry = nextfreeentry; - pH->look_up2[subtree] = currententry = nextfreeentry; - - nextfreeentry -= 2; - } - - code <<= (16 - (code_size - 8)); - - for (l = code_size; l > 9; l--) - { - if ((code & 0x8000) == 0) - currententry--; - - if (pH->tree[-currententry - 1] == 0) - { - pH->tree[-currententry - 1] = nextfreeentry; - - currententry = nextfreeentry; - - nextfreeentry -= 2; - } - else - currententry = pH->tree[-currententry - 1]; - - code <<= 1; - } - - if ((code & 0x8000) == 0) - currententry--; - - pH->tree[-currententry - 1] = i; - } - - p++; - } - } - - // Verifies the quantization tables needed for this scan are available. - void jpeg_decoder::check_quant_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - if (m_quant[m_comp_quant[m_comp_list[i]]] == NULL) - stop_decoding(JPGD_UNDEFINED_QUANT_TABLE); - } - - // Verifies that all the Huffman tables needed for this scan are available. - void jpeg_decoder::check_huff_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - { - if ((m_spectral_start == 0) && (m_huff_num[m_comp_dc_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - - if ((m_spectral_end > 0) && (m_huff_num[m_comp_ac_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - } - - for (int i = 0; i < JPGD_MAX_HUFF_TABLES; i++) - if (m_huff_num[i]) - { - if (!m_pHuff_tabs[i]) - m_pHuff_tabs[i] = (huff_tables *)alloc(sizeof(huff_tables)); - - make_huff_table(i, m_pHuff_tabs[i]); - } - } - - // Determines the component order inside each MCU. - // Also calcs how many MCU's are on each row, etc. - void jpeg_decoder::calc_mcu_block_order() - { - int component_num, component_id; - int max_h_samp = 0, max_v_samp = 0; - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - if (m_comp_h_samp[component_id] > max_h_samp) - max_h_samp = m_comp_h_samp[component_id]; - - if (m_comp_v_samp[component_id] > max_v_samp) - max_v_samp = m_comp_v_samp[component_id]; - } - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - m_comp_h_blocks[component_id] = ((((m_image_x_size * m_comp_h_samp[component_id]) + (max_h_samp - 1)) / max_h_samp) + 7) / 8; - m_comp_v_blocks[component_id] = ((((m_image_y_size * m_comp_v_samp[component_id]) + (max_v_samp - 1)) / max_v_samp) + 7) / 8; - } - - if (m_comps_in_scan == 1) - { - m_mcus_per_row = m_comp_h_blocks[m_comp_list[0]]; - m_mcus_per_col = m_comp_v_blocks[m_comp_list[0]]; - } - else - { - m_mcus_per_row = (((m_image_x_size + 7) / 8) + (max_h_samp - 1)) / max_h_samp; - m_mcus_per_col = (((m_image_y_size + 7) / 8) + (max_v_samp - 1)) / max_v_samp; - } - - if (m_comps_in_scan == 1) - { - m_mcu_org[0] = m_comp_list[0]; - - m_blocks_per_mcu = 1; - } - else - { - m_blocks_per_mcu = 0; - - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - int num_blocks; - - component_id = m_comp_list[component_num]; - - num_blocks = m_comp_h_samp[component_id] * m_comp_v_samp[component_id]; - - while (num_blocks--) - m_mcu_org[m_blocks_per_mcu++] = component_id; - } - } - } - - // Starts a new scan. - int jpeg_decoder::init_scan() - { - if (!locate_sos_marker()) - return JPGD_FALSE; - - calc_mcu_block_order(); - - check_huff_tables(); - - check_quant_tables(); - - memset(m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - if (m_restart_interval) - { - m_restarts_left = m_restart_interval; - m_next_restart_num = 0; - } - - fix_in_buffer(); - - return JPGD_TRUE; - } - - // Starts a frame. Determines if the number of components or sampling factors - // are supported. - void jpeg_decoder::init_frame() - { - int i; - - if (m_comps_in_frame == 1) - { - if ((m_comp_h_samp[0] != 1) || (m_comp_v_samp[0] != 1)) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - m_scan_type = JPGD_GRAYSCALE; - m_max_blocks_per_mcu = 1; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if (m_comps_in_frame == 3) - { - if ( ((m_comp_h_samp[1] != 1) || (m_comp_v_samp[1] != 1)) || - ((m_comp_h_samp[2] != 1) || (m_comp_v_samp[2] != 1)) ) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH1V1; - - m_max_blocks_per_mcu = 3; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH2V1; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH1V2; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 16; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH2V2; - m_max_blocks_per_mcu = 6; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 16; - } - else - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - } - else - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - m_max_mcus_per_row = (m_image_x_size + (m_max_mcu_x_size - 1)) / m_max_mcu_x_size; - m_max_mcus_per_col = (m_image_y_size + (m_max_mcu_y_size - 1)) / m_max_mcu_y_size; - - // These values are for the *destination* pixels: after conversion. - if (m_scan_type == JPGD_GRAYSCALE) - m_dest_bytes_per_pixel = 1; - else - m_dest_bytes_per_pixel = 4; - - m_dest_bytes_per_scan_line = ((m_image_x_size + 15) & 0xFFF0) * m_dest_bytes_per_pixel; - - m_real_dest_bytes_per_scan_line = (m_image_x_size * m_dest_bytes_per_pixel); - - // Initialize two scan line buffers. - m_pScan_line_0 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - if ((m_scan_type == JPGD_YH1V2) || (m_scan_type == JPGD_YH2V2)) - m_pScan_line_1 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - - m_max_blocks_per_row = m_max_mcus_per_row * m_max_blocks_per_mcu; - - // Should never happen - if (m_max_blocks_per_row > JPGD_MAX_BLOCKS_PER_ROW) - stop_decoding(JPGD_ASSERTION_ERROR); - - // Allocate the coefficient buffer, enough for one MCU - m_pMCU_coefficients = (jpgd_block_t*)alloc(m_max_blocks_per_mcu * 64 * sizeof(jpgd_block_t)); - - for (i = 0; i < m_max_blocks_per_mcu; i++) - m_mcu_block_max_zag[i] = 64; - - m_expanded_blocks_per_component = m_comp_h_samp[0] * m_comp_v_samp[0]; - m_expanded_blocks_per_mcu = m_expanded_blocks_per_component * m_comps_in_frame; - m_expanded_blocks_per_row = m_max_mcus_per_row * m_expanded_blocks_per_mcu; - // Freq. domain chroma upsampling is only supported for H2V2 subsampling factor. -// BEGIN EPIC MOD -#if JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING - m_freq_domain_chroma_upsample = (m_expanded_blocks_per_mcu == 4*3); -#else - m_freq_domain_chroma_upsample = 0; -#endif -// END EPIC MOD - - if (m_freq_domain_chroma_upsample) - m_pSample_buf = (uint8 *)alloc(m_expanded_blocks_per_row * 64); - else - m_pSample_buf = (uint8 *)alloc(m_max_blocks_per_row * 64); - - m_total_lines_left = m_image_y_size; - - m_mcu_lines_left = 0; - - create_look_ups(); - } - - // The coeff_buf series of methods originally stored the coefficients - // into a "virtual" file which was located in EMS, XMS, or a disk file. A cache - // was used to make this process more efficient. Now, we can store the entire - // thing in RAM. - jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y) - { - coeff_buf* cb = (coeff_buf*)alloc(sizeof(coeff_buf)); - - cb->block_num_x = block_num_x; - cb->block_num_y = block_num_y; - cb->block_len_x = block_len_x; - cb->block_len_y = block_len_y; - cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t); - cb->pData = (uint8 *)alloc(cb->block_size * block_num_x * block_num_y, true); - return cb; - } - - inline jpgd_block_t *jpeg_decoder::coeff_buf_getp(coeff_buf *cb, int block_x, int block_y) - { - JPGD_ASSERT((block_x < cb->block_num_x) && (block_y < cb->block_num_y)); - return (jpgd_block_t *)(cb->pData + block_x * cb->block_size + block_y * (cb->block_size * cb->block_num_x)); - } - - // The following methods decode the various types of m_blocks encountered - // in progressively encoded images. - void jpeg_decoder::decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, r; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - if ((s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_dc_tab[component_id]])) != 0) - { - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - } - - pD->m_last_dc_val[component_id] = (s += pD->m_last_dc_val[component_id]); - - p[0] = static_cast(s << pD->m_successive_low); - } - - void jpeg_decoder::decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - if (pD->get_bits_no_markers(1)) - { - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - p[0] |= (1 << pD->m_successive_low); - } - } - - void jpeg_decoder::decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int k, s, r; - - if (pD->m_eob_run) - { - pD->m_eob_run--; - return; - } - - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - for (k = pD->m_spectral_start; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if ((k += r) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - - p[g_ZAG[k]] = static_cast(s << pD->m_successive_low); - } - else - { - if (r == 15) - { - if ((k += 15) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - } - else - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - pD->m_eob_run--; - - break; - } - } - } - } - - void jpeg_decoder::decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, k, r; - int p1 = 1 << pD->m_successive_low; - int m1 = (-1) << pD->m_successive_low; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - k = pD->m_spectral_start; - - if (pD->m_eob_run == 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if (s != 1) - pD->stop_decoding(JPGD_DECODE_ERROR); - - if (pD->get_bits_no_markers(1)) - s = p1; - else - s = m1; - } - else - { - if (r != 15) - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - break; - } - } - - do - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - else - { - if (--r < 0) - break; - } - - k++; - - } while (k <= pD->m_spectral_end); - - if ((s) && (k < 64)) - { - p[g_ZAG[k]] = static_cast(s); - } - } - } - - if (pD->m_eob_run > 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - } - - pD->m_eob_run--; - } - } - - // Decode a scan in a progressively encoded image. - void jpeg_decoder::decode_scan(pDecode_block_func decode_block_func) - { - int mcu_row, mcu_col, mcu_block; - int block_x_mcu[JPGD_MAX_COMPONENTS], m_block_y_mcu[JPGD_MAX_COMPONENTS]; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - for (mcu_col = 0; mcu_col < m_mcus_per_col; mcu_col++) - { - int component_num, component_id; - - memset(block_x_mcu, 0, sizeof(block_x_mcu)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - - decode_block_func(this, component_id, block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - m_restarts_left--; - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - } - - // Decode a progressively encoded image. - void jpeg_decoder::init_progressive() - { - int i; - - if (m_comps_in_frame == 4) - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - // Allocate the coefficient buffers. - for (i = 0; i < m_comps_in_frame; i++) - { - m_dc_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 1, 1); - m_ac_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 8, 8); - } - - for ( ; ; ) - { - int dc_only_scan, refinement_scan; - pDecode_block_func decode_block_func; - - if (!init_scan()) - break; - - dc_only_scan = (m_spectral_start == 0); - refinement_scan = (m_successive_high != 0); - - if ((m_spectral_start > m_spectral_end) || (m_spectral_end > 63)) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if (dc_only_scan) - { - if (m_spectral_end) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - } - else if (m_comps_in_scan != 1) /* AC scans can only contain one component */ - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if ((refinement_scan) && (m_successive_low != m_successive_high - 1)) - stop_decoding(JPGD_BAD_SOS_SUCCESSIVE); - - if (dc_only_scan) - { - if (refinement_scan) - decode_block_func = decode_block_dc_refine; - else - decode_block_func = decode_block_dc_first; - } - else - { - if (refinement_scan) - decode_block_func = decode_block_ac_refine; - else - decode_block_func = decode_block_ac_first; - } - - decode_scan(decode_block_func); - - m_bits_left = 16; - get_bits(16); - get_bits(16); - } - - m_comps_in_scan = m_comps_in_frame; - - for (i = 0; i < m_comps_in_frame; i++) - m_comp_list[i] = i; - - calc_mcu_block_order(); - } - - void jpeg_decoder::init_sequential() - { - if (!init_scan()) - stop_decoding(JPGD_UNEXPECTED_MARKER); - } - - void jpeg_decoder::decode_start() - { - init_frame(); - - if (m_progressive_flag) - init_progressive(); - else - init_sequential(); - } - - void jpeg_decoder::decode_init(jpeg_decoder_stream *pStream) - { - init(pStream); - locate_sof_marker(); - } - - jpeg_decoder::jpeg_decoder(jpeg_decoder_stream *pStream) - { - if (setjmp(m_jmp_state)) - return; - decode_init(pStream); - } - - int jpeg_decoder::begin_decoding() - { - if (m_ready_flag) - return JPGD_SUCCESS; - - if (m_error_code) - return JPGD_FAILED; - - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - decode_start(); - - m_ready_flag = true; - - return JPGD_SUCCESS; - } - - jpeg_decoder::~jpeg_decoder() - { - free_all_blocks(); - } - - jpeg_decoder_file_stream::jpeg_decoder_file_stream() - { - m_pFile = NULL; - m_eof_flag = false; - m_error_flag = false; - } - - void jpeg_decoder_file_stream::close() - { - if (m_pFile) - { - fclose(m_pFile); - m_pFile = NULL; - } - - m_eof_flag = false; - m_error_flag = false; - } - - jpeg_decoder_file_stream::~jpeg_decoder_file_stream() - { - close(); - } - - bool jpeg_decoder_file_stream::open(const char *Pfilename) - { - close(); - - m_eof_flag = false; - m_error_flag = false; - -#if defined(_MSC_VER) - m_pFile = NULL; - fopen_s(&m_pFile, Pfilename, "rb"); -#else - m_pFile = fopen(Pfilename, "rb"); -#endif - return m_pFile != NULL; - } - - int jpeg_decoder_file_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - if (!m_pFile) - return -1; - - if (m_eof_flag) - { - *pEOF_flag = true; - return 0; - } - - if (m_error_flag) - return -1; - - int bytes_read = static_cast(fread(pBuf, 1, max_bytes_to_read, m_pFile)); - if (bytes_read < max_bytes_to_read) - { - if (ferror(m_pFile)) - { - m_error_flag = true; - return -1; - } - - m_eof_flag = true; - *pEOF_flag = true; - } - - return bytes_read; - } - - bool jpeg_decoder_mem_stream::open(const uint8 *pSrc_data, uint size) - { - close(); - m_pSrc_data = pSrc_data; - m_ofs = 0; - m_size = size; - return true; - } - - int jpeg_decoder_mem_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - *pEOF_flag = false; - - if (!m_pSrc_data) - return -1; - - uint bytes_remaining = m_size - m_ofs; - if ((uint)max_bytes_to_read > bytes_remaining) - { - max_bytes_to_read = bytes_remaining; - *pEOF_flag = true; - } - - memcpy(pBuf, m_pSrc_data + m_ofs, max_bytes_to_read); - m_ofs += max_bytes_to_read; - - return max_bytes_to_read; - } - - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps) - { - if (!actual_comps) - return NULL; - *actual_comps = 0; - - if ((!pStream) || (!width) || (!height) || (!req_comps)) - return NULL; - - if ((req_comps != 1) && (req_comps != 3) && (req_comps != 4)) - return NULL; - - jpeg_decoder decoder(pStream); - if (decoder.get_error_code() != JPGD_SUCCESS) - return NULL; - - const int image_width = decoder.get_width(), image_height = decoder.get_height(); - *width = image_width; - *height = image_height; - *actual_comps = decoder.get_num_components(); - - if (decoder.begin_decoding() != JPGD_SUCCESS) - return NULL; - - const int dst_bpl = image_width * req_comps; - - uint8 *pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height); - if (!pImage_data) - return NULL; - - for (int y = 0; y < image_height; y++) - { - const uint8* pScan_line = 0; - uint scan_line_len; - if (decoder.decode((const void**)&pScan_line, &scan_line_len) != JPGD_SUCCESS) - { - jpgd_free(pImage_data); - return NULL; - } - - uint8 *pDst = pImage_data + y * dst_bpl; - - if (((req_comps == 4) && (decoder.get_num_components() == 3)) || - ((req_comps == 1) && (decoder.get_num_components() == 1))) - { - memcpy(pDst, pScan_line, dst_bpl); - } - else if (decoder.get_num_components() == 1) - { - if (req_comps == 3) - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst += 3; - } - } - else - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst[3] = 255; - pDst += 4; - } - } - } - else if (decoder.get_num_components() == 3) - { - if (req_comps == 1) - { - const int YR = 19595, YG = 38470, YB = 7471; - for (int x = 0; x < image_width; x++) - { - int r = pScan_line[x*4+0]; - int g = pScan_line[x*4+1]; - int b = pScan_line[x*4+2]; - *pDst++ = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - } - } - else - { - for (int x = 0; x < image_width; x++) - { - pDst[0] = pScan_line[x*4+0]; - pDst[1] = pScan_line[x*4+1]; - pDst[2] = pScan_line[x*4+2]; - pDst += 3; - } - } - } - } - - return pImage_data; - } - -// BEGIN EPIC MOD - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format) - { - jpg_format = (ERGBFormatJPG)format; -// EMD EPIC MOD - jpgd::jpeg_decoder_mem_stream mem_stream(pSrc_data, src_data_size); - return decompress_jpeg_image_from_stream(&mem_stream, width, height, actual_comps, req_comps); - } - - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps) - { - jpgd::jpeg_decoder_file_stream file_stream; - if (!file_stream.open(pSrc_filename)) - return NULL; - return decompress_jpeg_image_from_stream(&file_stream, width, height, actual_comps, req_comps); - } - -} // namespace jpgd diff --git a/spaces/ramiin2/AutoGPT/autogpt/spinner.py b/spaces/ramiin2/AutoGPT/autogpt/spinner.py deleted file mode 100644 index 4e33d74213881352546f334ccb1eb4772b8b7b70..0000000000000000000000000000000000000000 --- a/spaces/ramiin2/AutoGPT/autogpt/spinner.py +++ /dev/null @@ -1,65 +0,0 @@ -"""A simple spinner module""" -import itertools -import sys -import threading -import time - - -class Spinner: - """A simple spinner class""" - - def __init__(self, message: str = "Loading...", delay: float = 0.1) -> None: - """Initialize the spinner class - - Args: - message (str): The message to display. - delay (float): The delay between each spinner update. - """ - self.spinner = itertools.cycle(["-", "/", "|", "\\"]) - self.delay = delay - self.message = message - self.running = False - self.spinner_thread = None - - def spin(self) -> None: - """Spin the spinner""" - while self.running: - sys.stdout.write(f"{next(self.spinner)} {self.message}\r") - sys.stdout.flush() - time.sleep(self.delay) - sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") - - def __enter__(self): - """Start the spinner""" - self.running = True - self.spinner_thread = threading.Thread(target=self.spin) - self.spinner_thread.start() - - return self - - def __exit__(self, exc_type, exc_value, exc_traceback) -> None: - """Stop the spinner - - Args: - exc_type (Exception): The exception type. - exc_value (Exception): The exception value. - exc_traceback (Exception): The exception traceback. - """ - self.running = False - if self.spinner_thread is not None: - self.spinner_thread.join() - sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") - sys.stdout.flush() - - def update_message(self, new_message, delay=0.1): - """Update the spinner message - Args: - new_message (str): New message to display - delay: Delay in seconds before updating the message - """ - time.sleep(delay) - sys.stdout.write( - f"\r{' ' * (len(self.message) + 2)}\r" - ) # Clear the current message - sys.stdout.flush() - self.message = new_message diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Corel Video Studio Templates Zip.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Corel Video Studio Templates Zip.md deleted file mode 100644 index 98d48b760eac9010b75f725160313f610f1d8ba7..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Corel Video Studio Templates Zip.md +++ /dev/null @@ -1,7 +0,0 @@ - -

    CorelDRAW is software built for graphic designing, and we know that many of you use it too. All your designs, like logo, typography, illustrations, and others, are saved in the.cdr extension. However, what happens sometimes is that a Corel draw file can get corrupt. Trying to open it would be a time-consuming task, especially when the file has been unsaved and the program hangs. If you want to repair or recover a coral draw file, then you are at the right place now. Here, we are sharing some possible solutions that help you in knowing how to recover corrupted Corel draw x7 files with a few simple methods.

    -

    corel video studio templates zip


    Download File ->>> https://urlgoal.com/2uCMog



    -

    The CyberLink DVD Restoration Suite is a free DVD repair and backup software. To use the DVD repair tool, you need to burn a file to a disc and then load the disc into the DVD Restoration Suite. To backup your media, you need to have at least one DVD-R disc. You can create a backup by plugging your DVD drive into your computer while the DVD-R disc is inserted into the drive. The software will detect the disc and build a backup file. The software can be found at http://www.coreldraw.com/discrepair.aspx .

    -

    The Project Studio and Post-Production Studio editing programs are geared toward video creation, and you can use them in tandem. I tested the Program version, and found it to be relatively easy to use. Files and folders are mixed on-screen, and the default workspace has a Level view of the material. You can customize everything by changing the workspace mode, and the Project Studio includes tools for trimming, effects, color correction, transitions, and stabilization.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack Photograv 3 1 72 !FREE!.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack Photograv 3 1 72 !FREE!.md deleted file mode 100644 index dd4924a6ceaba54ad718e497105cc049a940ad45..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack Photograv 3 1 72 !FREE!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Crack Photograv 3 1 72


    DOWNLOAD 🆗 https://urlgoal.com/2uCJrc



    -
    - 4fefd39f24
    -
    -
    -

    diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ea Games Keygen Fff Free 56.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ea Games Keygen Fff Free 56.md deleted file mode 100644 index 06ad4065cc99ad8a4af6ee1b34025051d9c544ac..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ea Games Keygen Fff Free 56.md +++ /dev/null @@ -1,31 +0,0 @@ - -

    How to Get EA Games Keygen FFF Free 56

    -

    If you are a fan of EA games, you might be looking for a way to activate them for free. You might have heard of EA games keygen FFF free 56, a tool that can generate product keys for over 200 EA games. But what is it and how does it work? In this article, we will explain everything you need to know about EA games keygen FFF free 56 and how to use it safely and effectively.

    -

    ea games keygen fff free 56


    Download Filehttps://urlgoal.com/2uCL1D



    -

    What is EA Games Keygen FFF Free 56?

    -

    EA games keygen FFF free 56 is a software program created by FFF team, a group of hackers and crackers who specialize in cracking games and software. The program can scan the registry of your computer and find the installed EA games. Then, it can generate a unique product key for each game, which you can use to activate them without paying anything.

    -

    The program supports almost all EA games, including popular titles like The Sims 3, FIFA, Need for Speed, Battlefield, Mass Effect, Dragon Age, and more. It also works with older games that require online activation. The program is updated regularly to include new games and fix any bugs or errors.

    -

    How to Download and Use EA Games Keygen FFF Free 56?

    -

    To download EA games keygen FFF free 56, you need to find a reliable source that offers the clean and working version of the program. There are many websites that claim to offer the program, but some of them might contain viruses, malware, or fake files that can harm your computer or steal your personal information.

    -

    -

    One of the websites that we recommend is jemi.so, a modern website builder for creatives, entrepreneurs, and dreamers. You can build a beautiful link-in-bio site, portfolio, or landing page in minutes with jemi.so. You can also monetize your site with blogging, donations, subscriptions, and more. On jemi.so, you can find the link to download EA games keygen FFF free 56 safely and quickly.

    -

    To use EA games keygen FFF free 56, you need to follow these simple steps:

    -
      -
    1. Download the program from jemi.so and extract it to a folder on your computer.
    2. -
    3. Run the program as administrator and wait for it to load.
    4. -
    5. Select the game that you want to activate from the list and click on Generate.
    6. -
    7. Copy the product key that appears on the screen and paste it into the game activation window.
    8. -
    9. Enjoy your game for free!
    10. -
    -

    Is EA Games Keygen FFF Free 56 Safe and Legal?

    -

    EA games keygen FFF free 56 is a tool that can help you save money and play your favorite EA games without any restrictions. However, it is not a legal or authorized way to activate your games. It is a form of piracy that violates the terms and conditions of EA and the game developers. Using this tool can expose you to legal risks and penalties if you are caught by EA or other authorities.

    -

    Moreover, EA games keygen FFF free 56 is not a safe tool to use on your computer. It can contain viruses, malware, or spyware that can damage your system or compromise your security. It can also cause errors or crashes in your games or prevent you from accessing online features or updates. It can also be detected by anti-virus software or anti-cheat systems and result in bans or suspensions from your games or accounts.

    -

    Therefore, we do not recommend using EA games keygen FFF free 56 or any other similar tools to activate your games. It is better to buy your games legally from official sources like Steam, Origin, or other platforms. This way, you can support the game developers and enjoy your games with full features and quality.

    -

    Conclusion

    -

    EA games keygen FFF free 56 is a software program that can generate product keys for over 200 EA games for free. However, it is not a legal or safe way to activate your games. It can cause legal problems, security issues, or game errors for you. Therefore, we advise you to avoid using this tool and buy your games legally from official sources instead.

    -

    We hope this article has helped you understand what EA games keygen FFF free 56 is and how to use it properly. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

    -

    Conclusion

    -

    EA games keygen FFF free 56 is a software program that can generate product keys for over 200 EA games for free. However, it is not a legal or safe way to activate your games. It can cause legal problems, security issues, or game errors for you. Therefore, we advise you to avoid using this tool and buy your games legally from official sources instead.

    -

    We hope this article has helped you understand what EA games keygen FFF free 56 is and how to use it properly. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Football Manager Handheld 2013 EUR PSP ISO CSO Download.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Football Manager Handheld 2013 EUR PSP ISO CSO Download.md deleted file mode 100644 index e6200dbff5a0c15224abe51d5367d00e1581b51d..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Football Manager Handheld 2013 EUR PSP ISO CSO Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Football Manager Handheld 2013 EUR PSP ISO CSO Download


    DOWNLOADhttps://urlgoal.com/2uCLqI



    -
    -Free Psp Game Football Manager Handheld 2013 Usa Full Iso ... , These Free ... Football Psp Games Download - Page 2 , Download Playstation Portable Roms Free ... Full Game High Graphics ... , Football Manager Handheld 2008 (Europe). 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/rinong/StyleGAN-NADA/op/upfirdn2d_cpu.py b/spaces/rinong/StyleGAN-NADA/op/upfirdn2d_cpu.py deleted file mode 100644 index a0f820b4c81e03598589b1ea6b95cf9bef9b04f8..0000000000000000000000000000000000000000 --- a/spaces/rinong/StyleGAN-NADA/op/upfirdn2d_cpu.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -import torch -from torch.autograd import Function -from torch.nn import functional as F - - - -module_path = os.path.dirname(__file__) - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = upfirdn2d_native( - input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1] - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x - - return out.view(-1, channel, out_h, out_w) diff --git a/spaces/rorallitri/biomedical-language-models/logs/Boukalates Algeroises En Arabe Pdf Download !FREE!.md b/spaces/rorallitri/biomedical-language-models/logs/Boukalates Algeroises En Arabe Pdf Download !FREE!.md deleted file mode 100644 index eaa34c3efeb240587e73c66c8920b9c522e8ad20..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Boukalates Algeroises En Arabe Pdf Download !FREE!.md +++ /dev/null @@ -1,22 +0,0 @@ -

    boukalates algeroises en arabe pdf download


    Download File » https://tinurll.com/2uzor5



    - -état-major-militaire-algerie.pdf ">new drug aprilasil Under the plan, the bank would turn private financial assets such as loan portfolios into state assets that could be sold, defaulted on or otherwise traded to increase efficiency and increase the government’s income. - -al4a The plane came in at about noon over the area of Srinagar and Kupwara, said Rajnish Kumar, a senior police officer at Jammu, which is just west of Srinagar, the capital of Indian-controlled Kashmir. - -12yo The Tigers can be in the playoffs, but that’s likely because of the Patriots, not New England. Otherwise, a lot of things have to go right for Detroit to make the postseason. - -14yo The struggling firm reported a loss of 22 million pounds ($27 million) for the six months to June 30, when it lost more than twice as much as forecast by analysts, and said it expected a further decline in profits in the second half. - -myvidster.fun And that would be a mistake. The longer the war, the more internal divisions and alliances will grow on all sides, giving everyone in the region the excuse to stay in the conflict longer than they should. Instead of closing the curtains on this silly war, the world should demand a speedy and peaceful ending. - -Withdraw gettotube Mohammad al-Rifai, the head of the Syrian National Coalition, told Reuters the rebel offensive was not aimed at toppling the government but would be launched "if our demands are not met". - -al4a porn - - "We really have to take care of the family that we have," he said. "The nature of their lives has really changed for the better. This is definitely a good thing for them." - -little teen ass tube Led 4fefd39f24
    -
    -
    -

    diff --git a/spaces/rorallitri/biomedical-language-models/logs/Fate stay night anime torrent How to get the highest quality and fastest downloads.md b/spaces/rorallitri/biomedical-language-models/logs/Fate stay night anime torrent How to get the highest quality and fastest downloads.md deleted file mode 100644 index 175a850fd2907cfbf0427b97f6287d01da547661..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Fate stay night anime torrent How to get the highest quality and fastest downloads.md +++ /dev/null @@ -1,21 +0,0 @@ - -

    This torrent site has been around for quite a while and has a rich collection of titles. It has a crisp user interface that allows you to search for your favorite anime easily or browse from the anime icons.

    -

    Fate stay night anime torrent


    Download Zip 🗹 https://tinurll.com/2uzokk



    -

    AniArena screams anime with its background of colorful Japanese cartoon characters. It is a torrent tracker site that attracts fans around the world. You can start downloading without signing up for an account.

    -

    At a glance, Project GXS is similar to one of the many fan-created blogs on anime. But clicking onto the index brings up a mammoth list of all the movies listed on the site. Some of the titles offer direct downloads apart from torrenting.

    -

    Most countries do have strict copyright laws. Although legal actions taken on torrent downloaders are relatively small in numbers, they do happen. In Japan, a 39-year old man was arrested for downloading anime and other files on P2P software.

    -

    While streaming services have gone mainstream, the torrenting community is still active. However, not all P2P communities are great for anime torrents. Anime fans tend to visit specific torrent sites that cater to genre lovers.

    -

    Some of these anime torrenting sites also provide filters that return the torrent results of animes within specific dates. Your chances of watching old movies on torrent sites are higher than tuning in to the mainstream streaming providers.

    -

    Of course, the best torrent sites should have a rich collection of animes across all genres. The number of users downloading or sharing the file is also important as torrenting speed increases when more users are sharing the same file.

    -

    -

    If you prefer plain and simple torrent sites, the best option is Anime Tosho. It has an updated list of newly uploaded torrents. No fancy backgrounds but purely a large collection of torrents for anime fans.

    -

    Fate/stay night is a Japanese visual novel developed by Type-Moon and originally released as an adult game for Windows on January 30, 2004. A version of Fate/stay night rated for ages 15 and up titled Fate/stay night Réalta Nua (Irish for "new star"), which features the Japanese voice actors from the anime series, was released in 2007 for the PlayStation 2 and later for download on Windows as a trilogy covering the three main storylines: Fate, Unlimited Blade Works and Heaven's Feel. Réalta Nua was also ported to the PlayStation Vita, iOS and Android. The plot focuses on a young mage named Shirou Emiya who becomes a warrior in a battle between "Servants" known as the Holy Grail War. Shirou bonds with a heroine through each route and confronts different adversaries participating in the war.

    -

    A manga series adaptation by Datto Nishiwaki has serialized in Kadokawa Shoten's Shōnen Ace magazine between the February 2006 and December 2012 issues. A 24-episode anime series created by Studio Deen aired in Japan between January and June 2006. Both mostly follow the Fate route but add events from other storylines. A film adaptation, Fate/stay night: Unlimited Blade Works, also by Studio Deen, was released in Japanese theaters on January 23, 2010. A second anime television series, Fate/stay night: Unlimited Blade Works, was produced by Ufotable and aired between October 2014 and June 2015, following the game's second route as opposed to the first. A second manga adaptation by Taskohna began in 2015 in Kadokawa Shoten's Young Ace, focusing solely on the third route. A film trilogy adapted Heaven's Feel route of the visual novel, with the first film, titled presage flower, released in 2017, the second film, titled lost butterfly, released in 2019, and the final film, titled spring song, released in 2020. A third manga adaptation by Daisuke Moriyama began in 2021 in ASCII Media Works's Dengeki Daioh, focusing solely on the second route.

    -

    Fate/stay night spawned the Fate media franchise, consisting of a number of adaptations and spin-offs in various different media. On October 28, 2005, Type-Moon released a sequel to Fate/stay night, titled Fate/hollow ataraxia. Its plot is set half a year after the events of Fate/stay night. A light novel series titled Fate/Zero, set as a prequel to Fate/stay night, was published from 2006 to 2007, with an anime adaptation by Ufotable airing between October 2011 and June 2012. A spin-off magical girl manga series, Fate/kaleid liner Prisma Illya, began serialization in 2007 and has received multiple anime television series. Three fighting games have been released: Fate/unlimited codes for arcades and PlayStation 2, Fate/tiger colosseum and its sequel Fate/tiger colosseum Upper for PSP. A PSP RPG titled Fate/Extra was released on July 22, 2010, and a sequel and companion game, Fate/Extra CCC, was released on March 28, 2013. An online RPG titled Fate/Grand Order was released on Android on July 29, 2015, followed by an August 12 release on iOS; an anime film adaptation by Lay-duce was released on December 31, 2016, with sequel adaptations by Production I.G., CloverWorks and Signal.MD. As of July 2021[update], Fate/Grand Order grossed $5.6 billion worldwide, making it the eighth highest-grossing mobile game of all time.

    -

    Fate/stay night's gameplay requires little interaction from the player as most of the game's duration is spent reading the text that appears, representing either dialogue between the characters, narration, or the inner thoughts of the protagonist. Often, players will come to a "decision point" where they are given a chance to choose from options displayed on the screen, typically two to three at a time. The time between these decision points is variable. During these times, gameplay pauses until a choice is made that furthers the plot in a specific direction. There are three main plot lines that the player will have the chance to experience, one for each of the heroines in the story. To view all three plot lines, the player must replay the game multiple times and choose different choices during the decision points to progress the plot in an alternate direction. Finishing one route will unlock the next one. When interacting with the heroines in each route, an "affection meter" is created, which is raised by giving them an answer that pleases them. A "True Ending" can be unlocked depending on the player's affection.[2]

    -

    Kinoko Nasu first began writing Fate/stay night in college and had not intended it to be a game. Initially, Nasu only wrote what would become the game's Fate storyline.[15] However, the game went on to have three storylines, the Fate storyline being one of them. In his early drafts, Fate's heroine Saber was a man, and the protagonist was a girl with glasses.[16] This early draft was embodied in the short original video animation (OVA) Fate/Prototype, which was released with the final volume of the Carnival Phantasm OVA series.[17] Nasu set aside the project and went on to found Type-Moon with artist Takashi Takeuchi. After the success of their first visual novel Tsukihime in 2000, Type-Moon transitioned from a dōjin soft organization to a commercial organization. Nasu and Takeuchi decided to turn the old Fate story into a visual novel as Type-Moon's first commercial product. In the beginning, Nasu was worried that because the main character was a girl, the story might not work as a bishōjo game. Artist Takeuchi suggested switching the protagonist's and Saber's genders to fit the game market.[15]

    -

    The first two-story arcs completed were Fate and Unlimited Blade Works; the latter was partially presented to the public in a preview booklet at Comiket in December 2001.[21] Unlimited Blade Works was based on the idea of a character's confrontation with himself and his own ideals, something unrealized during the development of Tsukihime for the arc of Yumizuka Satsuki.[22] In 2002, it was found that the content that was already written was nearly equal in length to Tsukihime, leading to proposals to divide the game into two parts. However, due to the high cost of releasing two products at once, the arcs of Illya and Sakura were partially combined, resulting in Heaven's Feel.[23][24] Nasu original thought of extending the Fate route involving an alternative Fifth Holy Grail War where Shirou fought alongside Saber without a romantic relationship developing between them. Following their separation, Shirou would bond with Rin in a similar way to the true ending of Unlimited Blade Works.[25] The main theme in Fate/stay night is "conquering oneself". There are three storylines in the visual novel; each has a different theme. The first one, Fate, is the "oneself as an ideal." The second one, Unlimited Blade Works, is "struggling with oneself as an ideal." The third one, Heaven's Feel, is "the friction with real and ideal".[15]

    -

    According to Nasu, the main theme of the resulting Heaven's Feel arc was chosen to apply the protagonist's ideas in practice. This is in contrast it with Fate and Unlimited Blade Works, which paid most attention to the demonstration of Shirou's ideals.[26] Nasu wanted to portray him as a typical teenager while artist Takashi Takeuchi did not want him to have too much individuality to make players project themselves onto him.[27] In 2002, Takeuchi suggested Gen Urobuchi, a well-known author of Nitroplus visual novels, to connect to the preliminary scenario of the game, but Urobuchi ultimately refused. Afterward, Nasu decided that Fate/stay night would be the most significant work in his life, created by him from beginning to end.[19]

    -

    After translating the text into code, editing background images and sprites, and debugging audio-visual effects, on October 21, 2003, the game's demo version was released on a CD with the magazine Tech Gian from Enterbrain,[28] and on November 1 was posted on Type-Moon's site.[29] Fate/stay night was released in Japan on January 30, 2004, for Windows PCs.[30]

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Linqer4ActivationKey Why You Need This Powerful Tool for Your Data Queries.md b/spaces/rorallitri/biomedical-language-models/logs/Linqer4ActivationKey Why You Need This Powerful Tool for Your Data Queries.md deleted file mode 100644 index a39f455b5e425b015f97a9c03d346f08804be5d3..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Linqer4ActivationKey Why You Need This Powerful Tool for Your Data Queries.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Linqer4ActivationKey


    Downloadhttps://tinurll.com/2uznL6



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/rsandadi/BearDetector/README.md b/spaces/rsandadi/BearDetector/README.md deleted file mode 100644 index c66a6041e4aa8c601c73847870b4930e85288233..0000000000000000000000000000000000000000 --- a/spaces/rsandadi/BearDetector/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Minima -emoji: 💩 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/russel0719/deepfake_detector/training/datasets/classifier_dataset.py b/spaces/russel0719/deepfake_detector/training/datasets/classifier_dataset.py deleted file mode 100644 index 28bcb6c6418ea0d0da05366704af5d0945a0f953..0000000000000000000000000000000000000000 --- a/spaces/russel0719/deepfake_detector/training/datasets/classifier_dataset.py +++ /dev/null @@ -1,378 +0,0 @@ -import math -import os -import random -import sys -import traceback - -import cv2 -import numpy as np -import pandas as pd -import skimage.draw -from albumentations import ImageCompression, OneOf, GaussianBlur, Blur -from albumentations.augmentations.functional import image_compression, rot90 -from albumentations.pytorch.functional import img_to_tensor -from scipy.ndimage import binary_erosion, binary_dilation -from skimage import measure -from torch.utils.data import Dataset -import dlib - -from training.datasets.validation_set import PUBLIC_SET - - -def prepare_bit_masks(mask): - h, w = mask.shape - mid_w = w // 2 - mid_h = w // 2 - masks = [] - ones = np.ones_like(mask) - ones[:mid_h] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[mid_h:] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[:, :mid_w] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[:, mid_w:] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[:mid_h, :mid_w] = 0 - ones[mid_h:, mid_w:] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[:mid_h, mid_w:] = 0 - ones[mid_h:, :mid_w] = 0 - masks.append(ones) - return masks - - -detector = dlib.get_frontal_face_detector() -predictor = dlib.shape_predictor('libs/shape_predictor_68_face_landmarks.dat') - - -def blackout_convex_hull(img): - try: - rect = detector(img)[0] - sp = predictor(img, rect) - landmarks = np.array([[p.x, p.y] for p in sp.parts()]) - outline = landmarks[[*range(17), *range(26, 16, -1)]] - Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0]) - cropped_img = np.zeros(img.shape[:2], dtype=np.uint8) - cropped_img[Y, X] = 1 - # if random.random() > 0.5: - # img[cropped_img == 0] = 0 - # #leave only face - # return img - - y, x = measure.centroid(cropped_img) - y = int(y) - x = int(x) - first = random.random() > 0.5 - if random.random() > 0.5: - if first: - cropped_img[:y, :] = 0 - else: - cropped_img[y:, :] = 0 - else: - if first: - cropped_img[:, :x] = 0 - else: - cropped_img[:, x:] = 0 - - img[cropped_img > 0] = 0 - except Exception as e: - pass - - -def dist(p1, p2): - return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) - - -def remove_eyes(image, landmarks): - image = image.copy() - (x1, y1), (x2, y2) = landmarks[:2] - mask = np.zeros_like(image[..., 0]) - line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2) - w = dist((x1, y1), (x2, y2)) - dilation = int(w // 4) - line = binary_dilation(line, iterations=dilation) - image[line, :] = 0 - return image - - -def remove_nose(image, landmarks): - image = image.copy() - (x1, y1), (x2, y2) = landmarks[:2] - x3, y3 = landmarks[2] - mask = np.zeros_like(image[..., 0]) - x4 = int((x1 + x2) / 2) - y4 = int((y1 + y2) / 2) - line = cv2.line(mask, (x3, y3), (x4, y4), color=(1), thickness=2) - w = dist((x1, y1), (x2, y2)) - dilation = int(w // 4) - line = binary_dilation(line, iterations=dilation) - image[line, :] = 0 - return image - - -def remove_mouth(image, landmarks): - image = image.copy() - (x1, y1), (x2, y2) = landmarks[-2:] - mask = np.zeros_like(image[..., 0]) - line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2) - w = dist((x1, y1), (x2, y2)) - dilation = int(w // 3) - line = binary_dilation(line, iterations=dilation) - image[line, :] = 0 - return image - - -def remove_landmark(image, landmarks): - if random.random() > 0.5: - image = remove_eyes(image, landmarks) - elif random.random() > 0.5: - image = remove_mouth(image, landmarks) - elif random.random() > 0.5: - image = remove_nose(image, landmarks) - return image - - -def change_padding(image, part=5): - h, w = image.shape[:2] - # original padding was done with 1/3 from each side, too much - pad_h = int(((3 / 5) * h) / part) - pad_w = int(((3 / 5) * w) / part) - image = image[h // 5 - pad_h:-h // 5 + pad_h, w // 5 - pad_w:-w // 5 + pad_w] - return image - - -def blackout_random(image, mask, label): - binary_mask = mask > 0.4 * 255 - h, w = binary_mask.shape[:2] - - tries = 50 - current_try = 1 - while current_try < tries: - first = random.random() < 0.5 - if random.random() < 0.5: - pivot = random.randint(h // 2 - h // 5, h // 2 + h // 5) - bitmap_msk = np.ones_like(binary_mask) - if first: - bitmap_msk[:pivot, :] = 0 - else: - bitmap_msk[pivot:, :] = 0 - else: - pivot = random.randint(w // 2 - w // 5, w // 2 + w // 5) - bitmap_msk = np.ones_like(binary_mask) - if first: - bitmap_msk[:, :pivot] = 0 - else: - bitmap_msk[:, pivot:] = 0 - - if label < 0.5 and np.count_nonzero(image * np.expand_dims(bitmap_msk, axis=-1)) / 3 > (h * w) / 5 \ - or np.count_nonzero(binary_mask * bitmap_msk) > 40: - mask *= bitmap_msk - image *= np.expand_dims(bitmap_msk, axis=-1) - break - current_try += 1 - return image - - -def blend_original(img): - img = img.copy() - h, w = img.shape[:2] - rect = detector(img) - if len(rect) == 0: - return img - else: - rect = rect[0] - sp = predictor(img, rect) - landmarks = np.array([[p.x, p.y] for p in sp.parts()]) - outline = landmarks[[*range(17), *range(26, 16, -1)]] - Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0]) - raw_mask = np.zeros(img.shape[:2], dtype=np.uint8) - raw_mask[Y, X] = 1 - face = img * np.expand_dims(raw_mask, -1) - - # add warping - h1 = random.randint(h - h // 2, h + h // 2) - w1 = random.randint(w - w // 2, w + w // 2) - while abs(h1 - h) < h // 3 and abs(w1 - w) < w // 3: - h1 = random.randint(h - h // 2, h + h // 2) - w1 = random.randint(w - w // 2, w + w // 2) - face = cv2.resize(face, (w1, h1), interpolation=random.choice([cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])) - face = cv2.resize(face, (w, h), interpolation=random.choice([cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])) - - raw_mask = binary_erosion(raw_mask, iterations=random.randint(4, 10)) - img[raw_mask, :] = face[raw_mask, :] - if random.random() < 0.2: - img = OneOf([GaussianBlur(), Blur()], p=0.5)(image=img)["image"] - # image compression - if random.random() < 0.5: - img = ImageCompression(quality_lower=40, quality_upper=95)(image=img)["image"] - return img - - -class DeepFakeClassifierDataset(Dataset): - - def __init__(self, - data_path="/mnt/sota/datasets/deepfake", - fold=0, - label_smoothing=0.01, - padding_part=3, - hardcore=True, - crops_dir="crops", - folds_csv="folds.csv", - normalize={"mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225]}, - rotation=False, - mode="train", - reduce_val=True, - oversample_real=True, - transforms=None - ): - super().__init__() - self.data_root = data_path - self.fold = fold - self.folds_csv = folds_csv - self.mode = mode - self.rotation = rotation - self.padding_part = padding_part - self.hardcore = hardcore - self.crops_dir = crops_dir - self.label_smoothing = label_smoothing - self.normalize = normalize - self.transforms = transforms - self.df = pd.read_csv(self.folds_csv) - self.oversample_real = oversample_real - self.reduce_val = reduce_val - - def __getitem__(self, index: int): - - while True: - video, img_file, label, ori_video, frame, fold = self.data[index] - try: - if self.mode == "train": - label = np.clip(label, self.label_smoothing, 1 - self.label_smoothing) - img_path = os.path.join(self.data_root, self.crops_dir, video, img_file) - image = cv2.imread(img_path, cv2.IMREAD_COLOR) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - mask = np.zeros(image.shape[:2], dtype=np.uint8) - diff_path = os.path.join(self.data_root, "diffs", video, img_file[:-4] + "_diff.png") - try: - msk = cv2.imread(diff_path, cv2.IMREAD_GRAYSCALE) - if msk is not None: - mask = msk - except: - print("not found mask", diff_path) - pass - if self.mode == "train" and self.hardcore and not self.rotation: - landmark_path = os.path.join(self.data_root, "landmarks", ori_video, img_file[:-4] + ".npy") - if os.path.exists(landmark_path) and random.random() < 0.7: - landmarks = np.load(landmark_path) - image = remove_landmark(image, landmarks) - elif random.random() < 0.2: - blackout_convex_hull(image) - elif random.random() < 0.1: - binary_mask = mask > 0.4 * 255 - masks = prepare_bit_masks((binary_mask * 1).astype(np.uint8)) - tries = 6 - current_try = 1 - while current_try < tries: - bitmap_msk = random.choice(masks) - if label < 0.5 or np.count_nonzero(mask * bitmap_msk) > 20: - mask *= bitmap_msk - image *= np.expand_dims(bitmap_msk, axis=-1) - break - current_try += 1 - if self.mode == "train" and self.padding_part > 3: - image = change_padding(image, self.padding_part) - valid_label = np.count_nonzero(mask[mask > 20]) > 32 or label < 0.5 - valid_label = 1 if valid_label else 0 - rotation = 0 - if self.transforms: - data = self.transforms(image=image, mask=mask) - image = data["image"] - mask = data["mask"] - if self.mode == "train" and self.hardcore and self.rotation: - # landmark_path = os.path.join(self.data_root, "landmarks", ori_video, img_file[:-4] + ".npy") - dropout = 0.8 if label > 0.5 else 0.6 - if self.rotation: - dropout *= 0.7 - elif random.random() < dropout: - blackout_random(image, mask, label) - - # - # os.makedirs("../images", exist_ok=True) - # cv2.imwrite(os.path.join("../images", video+ "_" + str(1 if label > 0.5 else 0) + "_"+img_file), image[...,::-1]) - - if self.mode == "train" and self.rotation: - rotation = random.randint(0, 3) - image = rot90(image, rotation) - - image = img_to_tensor(image, self.normalize) - return {"image": image, "labels": np.array((label,)), "img_name": os.path.join(video, img_file), - "valid": valid_label, "rotations": rotation} - except Exception as e: - traceback.print_exc(file=sys.stdout) - print("Broken image", os.path.join(self.data_root, self.crops_dir, video, img_file)) - index = random.randint(0, len(self.data) - 1) - - def random_blackout_landmark(self, image, mask, landmarks): - x, y = random.choice(landmarks) - first = random.random() > 0.5 - # crop half face either vertically or horizontally - if random.random() > 0.5: - # width - if first: - image[:, :x] = 0 - mask[:, :x] = 0 - else: - image[:, x:] = 0 - mask[:, x:] = 0 - else: - # height - if first: - image[:y, :] = 0 - mask[:y, :] = 0 - else: - image[y:, :] = 0 - mask[y:, :] = 0 - - def reset(self, epoch, seed): - self.data = self._prepare_data(epoch, seed) - - def __len__(self) -> int: - return len(self.data) - - def _prepare_data(self, epoch, seed): - df = self.df - if self.mode == "train": - rows = df[df["fold"] != self.fold] - else: - rows = df[df["fold"] == self.fold] - seed = (epoch + 1) * seed - if self.oversample_real: - rows = self._oversample(rows, seed) - if self.mode == "val" and self.reduce_val: - # every 2nd frame, to speed up validation - rows = rows[rows["frame"] % 20 == 0] - # another option is to use public validation set - #rows = rows[rows["video"].isin(PUBLIC_SET)] - - print( - "real {} fakes {} mode {}".format(len(rows[rows["label"] == 0]), len(rows[rows["label"] == 1]), self.mode)) - data = rows.values - - np.random.seed(seed) - np.random.shuffle(data) - return data - - def _oversample(self, rows: pd.DataFrame, seed): - real = rows[rows["label"] == 0] - fakes = rows[rows["label"] == 1] - num_real = real["video"].count() - if self.mode == "train": - fakes = fakes.sample(n=num_real, replace=False, random_state=seed) - return pd.concat([real, fakes]) diff --git a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/op/upfirdn2d.py b/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/op/upfirdn2d.py deleted file mode 100644 index 9ca1f5c72098debfb0ffa1ba1b81eb92eb64d428..0000000000000000000000000000000000000000 --- a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/op/upfirdn2d.py +++ /dev/null @@ -1,198 +0,0 @@ -import os -import platform - -import torch -from torch.nn import functional as F -from torch.autograd import Function -from torch.utils.cpp_extension import load - -use_fallback = False - -# Try loading precompiled, otherwise use native fallback -try: - import upfirdn2d_op -except ModuleNotFoundError as e: - print('StyleGAN2: Optimized CUDA op UpFirDn2d not available, using native PyTorch fallback.') - use_fallback = True - -class UpFirDn2dBackward(Function): - @staticmethod - def forward( - ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size - ): - - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_op.upfirdn2d( - grad_output, - grad_kernel, - down_x, - down_y, - up_x, - up_y, - g_pad_x0, - g_pad_x1, - g_pad_y0, - g_pad_y1, - ) - grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.in_size = in_size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) - - gradgrad_out = upfirdn2d_op.upfirdn2d( - gradgrad_input, - kernel, - ctx.up_x, - ctx.up_y, - ctx.down_x, - ctx.down_y, - ctx.pad_x0, - ctx.pad_x1, - ctx.pad_y0, - ctx.pad_y1, - ) - # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3]) - gradgrad_out = gradgrad_out.view( - ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1] - ) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - batch, channel, in_h, in_w = input.shape - ctx.in_size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_op.upfirdn2d( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 - ) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.in_size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - if use_fallback or input.device.type == "cpu": - out = upfirdn2d_native( - input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1] - ) - else: - out = UpFirDn2d.apply( - input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1]) - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) diff --git a/spaces/scedlatioru/img-to-music/example/Danganronpa V3 Killing Harmony Password !NEW!.md b/spaces/scedlatioru/img-to-music/example/Danganronpa V3 Killing Harmony Password !NEW!.md deleted file mode 100644 index 50360607bf360bd6801497d6abab9c5ad1ef64da..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Danganronpa V3 Killing Harmony Password !NEW!.md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    Danganronpa has become a staple in the gaming industry, but I must say its helping to draw new attention to the Phoenix Wright series. If you loved V3, come and check out some ads for V3 and see if you can catch a glimpse of a familiar face.

    -

    Danganronpa V3: Killing Harmony Password


    Download ○○○ https://gohhs.com/2uEz2v



    -

    As Kazumi Akao stepped down as the writer of Danganronpa, the potential to have Monokuma continue his plan has become somewhat more likely. More importantly, the reason he gave for Akao's characters to become the Remnants of Despair in the first place is now supposedly gone, since the characters have been revived and are now awaiting a new plan. The possibility that the Remnants of Despair will be as influential in the remaining games as they were in Danganronpa 3's storyline is very high. As the other 3 characters have claimed, it seems that Despair was acting under their own will, with Hope only influencing their actions.]]> Gravity War / Gravity > Gravity Warhttp://danganronpa.moe/post/gravity-gravity-gravity-war/Mon, 23 Mar 2018 08:00:00 +0000

    From the time I wrote my other Gravity War post to the time I started to post this one, the Gravity War project has shifted significantly. I started to work on the final battle between Vitriol and Monokuma, only to realize the sheer amount of work that would be involved in how to show the battle. Sure, we got a trailer, but I lacked the necessary technical skills to produce a full-blown battle, so I just published it as a side project that ended up on the State of Decay Wiki. I did eventually release a non-battle version of the trailer, but it would take quite a long time before it was polished and rendered with proper lighting, which is what the rest of the Gravity War project is based around.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Ontrack EasyRecovery Pro 13.0.0.0 Crack UPD.md b/spaces/scedlatioru/img-to-music/example/Ontrack EasyRecovery Pro 13.0.0.0 Crack UPD.md deleted file mode 100644 index e76912f5d915ae41e55b06b42bbcde4b7e589e42..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Ontrack EasyRecovery Pro 13.0.0.0 Crack UPD.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Ontrack EasyRecovery Pro 13.0.0.0 Crack


    Download File –––––>>> https://gohhs.com/2uEyV9



    -
    - 3cee63e6c2
    -
    -
    -

    diff --git a/spaces/sdhsdhk/bingosjj/src/components/chat-notification.tsx b/spaces/sdhsdhk/bingosjj/src/components/chat-notification.tsx deleted file mode 100644 index 4be24d0f1755c8058698cfa66c736d8d4792475a..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingosjj/src/components/chat-notification.tsx +++ /dev/null @@ -1,77 +0,0 @@ -import { useEffect } from 'react' -import Image from 'next/image' - -import IconWarning from '@/assets/images/warning.svg' -import { ChatError, ErrorCode, ChatMessageModel } from '@/lib/bots/bing/types' -import { ExternalLink } from './external-link' -import { useBing } from '@/lib/hooks/use-bing' - -export interface ChatNotificationProps extends Pick, 'bot'> { - message?: ChatMessageModel -} - -function getAction(error: ChatError, reset: () => void) { - if (error.code === ErrorCode.THROTTLE_LIMIT) { - reset() - return ( -
    - 你已达到每日最大发送消息次数,请更换账号或隔一天后重试 -
    - ) - } - if (error.code === ErrorCode.BING_FORBIDDEN) { - return ( - - 你的账号已在黑名单,请尝试更换账号及申请解封 - - ) - } - if (error.code === ErrorCode.CONVERSATION_LIMIT) { - return ( -
    - 当前话题已中止,请点 - 重新开始 - 开启新的对话 -
    - ) - } - if (error.code === ErrorCode.BING_CAPTCHA) { - return ( - - 点击通过人机验证 - - ) - } - if (error.code === ErrorCode.BING_UNAUTHORIZED) { - reset() - return ( - 没有获取到身份信息或身份信息失效,点此重新设置 - ) - } - return error.message -} - -export function ChatNotification({ message, bot }: ChatNotificationProps) { - useEffect(() => { - window.scrollBy(0, 2000) - }, [message]) - - if (!message?.error) return - - return ( -
    -
    -
    -
    -
    - error - {getAction(message.error, () => bot.resetConversation())} -
    -
    -
    -
    -
    - ) -} diff --git a/spaces/shengyi-qian/3DOI/monoarti/detr/__init__.py b/spaces/shengyi-qian/3DOI/monoarti/detr/__init__.py deleted file mode 100644 index a3f26531befaf6abb215e48a0ef4bfc3da1c7c04..0000000000000000000000000000000000000000 --- a/spaces/shengyi-qian/3DOI/monoarti/detr/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .detr import build - - -def build_model(args): - return build(args) diff --git a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/__init__.py b/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/__init__.py deleted file mode 100644 index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000 --- a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Apktool M Pro APK The Best Way to Work with Android Installation Packages.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Apktool M Pro APK The Best Way to Work with Android Installation Packages.md deleted file mode 100644 index fa5a5649a64f2d7ae3cbb3962771ad850008cbea..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Apktool M Pro APK The Best Way to Work with Android Installation Packages.md +++ /dev/null @@ -1,156 +0,0 @@ - -

    Apktool M Pro APK: A Powerful Tool for Android Developers and Modders

    -

    If you are an Android developer or a modder who likes to tweak and customize your Android apps, you might have heard of Apktool, a tool that allows you to decompile and recompile Android application packages (APKs). However, Apktool can be complicated and tedious to use, especially if you are not familiar with the command line interface. That's why you might want to try Apktool M Pro APK, a user-friendly and feature-rich app that lets you do everything that Apktool can do, and more.

    -

    What is Apktool M Pro APK?

    -

    Apktool M Pro APK is an Android app that allows you to decompile and compile installation packages Android (.apk), including system applications. It also lets you edit, merge, split, sign, and verify APK files with ease. It is based on the original Apktool project, but it has a graphical user interface (GUI) that makes it easier to use. It also has some additional features that make it more powerful and versatile than Apktool.

    -

    apktool m pro apk


    DOWNLOAD 🆗 https://ssurll.com/2uNXlc



    -

    Apktool M Pro APK Features

    -

    Some of the features of Apktool M Pro APK are:

    -
      -
    • Customizable text editor with syntax highlighting and tooltips, with the ability to view java source code, with the ability to import their own highlighting code.
    • -
    • Convenient file manager with many features.
    • -
    • Work with various types of installation files *.apk, *.apks, *.xapk, *.apkm.
    • -
    • Antisplit – merge Android App Bundle (split) into a single installation file, and inserting and removing such files.
    • -
    • Create your own signature and signing applications.
    • -
    • Fast editing of the application name, package name (in the cloning application), app icons, and other things, without the need to rebuild.
    • -
    • Root access is not needed.
    • -
    • The application is completely free, is supported on older devices starting from Android 4.0, and contains no advertising, and Analytics.
    • -
    -

    Apktool M Pro APK Benefits

    -

    Some of the benefits of using Apktool M Pro APK are:

    -
      -
    • You can modify and customize your Android apps according to your preferences and needs.
    • -
    • You can learn how Android apps work by viewing their source code and resources.
    • -
    • You can create your own mods and share them with others.
    • -
    • You can fix or improve some bugs or errors in your apps.
    • -
    • You can optimize your apps for better performance and battery life.
    • -
    -

    How to Download and Install Apktool M Pro APK?

    -

    If you want to try Apktool M Pro APK, you need to download and install it on your device. Here are the steps to do so:

    -

    Download Apktool M Pro APK from APKCombo

    -

    The easiest way to download Apktool M Pro APK is from APKCombo, a website that provides free and safe download links for various Android apps. Here are the steps to download Apktool M Pro APK from APKCombo:

    -
      -
    1. Go to APKCombo website.
    2. -
    3. Type "Apktool M" in the search box and press enter.
    4. -
    5. Select "Apk tool M Pro APK" from the list of results and click on it.
    6. -
    7. Scroll down to the bottom of the page and click on the "Download APK" button.
    8. -
    9. Wait for the download to finish and save the file to your device.
    10. -
    -

    Install Apktool M Pro APK on Android Device

    -

    After downloading Apktool M Pro APK, you need to install it on your Android device. Here are the steps to install Apktool M Pro APK on Android device:

    -
      -
    1. Go to your device settings and enable "Unknown sources" or "Allow installation of apps from unknown sources". This will allow you to install apps that are not from the Google Play Store.
    2. -
    3. Locate the downloaded Apktool M Pro APK file on your device and tap on it.
    4. -
    5. Follow the instructions on the screen and grant the necessary permissions to install the app.
    6. -
    7. Wait for the installation to finish and launch the app from your app drawer or home screen.
    8. -
    -

    Install Apktool M Pro APK on Windows PC

    -

    If you want to use Apktool M Pro APK on your Windows PC, you need to install an Android emulator first. An Android emulator is a software that allows you to run Android apps on your PC. There are many Android emulators available, but one of the most popular ones is BlueStacks. Here are the steps to install Apktool M Pro APK on Windows PC using BlueStacks:

    -
      -
    1. Go to BlueStacks website and download the latest version of BlueStacks for Windows.
    2. -
    3. Run the installer and follow the instructions on the screen to install BlueStacks on your PC.
    4. -
    5. Launch BlueStacks and sign in with your Google account or create a new one.
    6. -
    7. Drag and drop the downloaded Apktool M Pro APK file into BlueStacks or click on the "Install APK" button at the bottom right corner of BlueStacks.
    8. -
    9. Wait for the installation to finish and launch the app from the BlueStacks home screen or app center.
    10. -
    -

    How to Use Apktool M Pro APK?

    -

    Now that you have installed Apktool M Pro APK, you can start using it to decompile and compile APK files, edit them with text editor, merge and split them with antisplit, sign and verify them with signature, and more. Here are some of the things you can do with Apktool M Pro APK:

    -

    Decompile and Compile APK Files

    -

    To decompile and compile APK files, follow these steps:

    -

    apktool m pro apk download
    -apktool m pro apk free
    -apktool m pro apk latest version
    -apktool m pro apk mod
    -apktool m pro apk cracked
    -apktool m pro apk no root
    -apktool m pro apk for android
    -apktool m pro apk for pc
    -apktool m pro apk for windows
    -apktool m pro apk for mac
    -apktool m pro apk editor
    -apktool m pro apk decompiler
    -apktool m pro apk compiler
    -apktool m pro apk installer
    -apktool m pro apk signer
    -apktool m pro apk clone
    -apktool m pro apk merge
    -apktool m pro apk split
    -apktool m pro apk antisplit
    -apktool m pro apk bundle
    -apktool m pro apks file
    -apktool m pro xapk file
    -apktool m pro apkm file
    -apktool m pro obb file
    -apktool m pro zip file
    -how to use apktool m pro
    -how to install apktool m pro
    -how to update apktool m pro
    -how to uninstall apktool m pro
    -how to download apks with apktool m pro
    -how to sign apks with apktool m pro
    -how to edit apks with apktool m pro
    -how to decompile apks with apktool m pro
    -how to compile apks with apktool m pro
    -how to clone apks with apktool m pro
    -how to merge apks with apktool m pro
    -how to split apks with apktool m pro
    -how to antisplit apks with apktool m pro
    -how to bundle apks with apktool m pro
    -what is the difference between apks and xapk files in ApkTool M Pro?
    -what is the difference between apks and apkm files in ApkTool M Pro?
    -what is the difference between apks and obb files in ApkTool M Pro?
    -what is the difference between apks and zip files in ApkTool M Pro?
    -what are the features of ApkTool M Pro?
    -what are the benefits of ApkTool M Pro?
    -what are the drawbacks of ApkTool M Pro?
    -what are the alternatives of ApkTool M Pro?
    -what are the reviews of ApkTool M Pro?
    -what are the ratings of ApkTool M Pro?

    -
      -
    1. Launch Apktool M Pro APK and grant root access if prompted.
    2. -
    3. Select the "Apktool" tab at the top of the app.
    4. -
    5. Select an APK file from your device or browse for one using the file manager.
    6. -
    7. Select "Decompile" or "Compile" depending on what you want to do. You can also choose whether to keep or delete resources, classes.dex, META-INF, etc.
    8. -
    9. Wait for the process to finish and check the output folder for the decompiled or compiled APK file.
    10. -
    -

    Edit APK Files with Text Editor

    -

    To edit APK files with text editor, follow these steps:

    -
      -
    1. Launch Apktool M Pro APK and grant root access if prompted.
    2. -
    3. Select the "Text Editor" tab at the top of the app.
    4. -
    5. Select an APK file from your device or browse for one using the file manager.
    6. -
    7. Select a file or folder inside the APK file that you want to edit. You can also search for a specific file or folder using the search bar.
    8. -
    9. Edit the file or folder as you wish using the text editor. You can use syntax highlighting, tooltips, undo/redo, find/replace, etc. You can also view java source code if available.
    10. -
    11. Save your changes and exit the text editor. You can also compile or sign your edited APK file using the buttons at the bottom of the text editor.
    12. -
    -

    Merge and Split APK Files with Antisplit

    -

    To merge and split APK files with antisplit, follow these steps:

    -
      -
    1. Launch Apktool M Pro APK and grant root access if prompted.
    2. -
    3. Select the "Antisplit" tab at the top of the app.
    4. -
    5. Select an APK file from your device or browse for one using the file manager. You can also select multiple APK files to merge them into one.
    6. -
    7. Select "Merge" or "Split" depending on what you want to do. You can also choose whether to keep or delete resources, classes.dex, META-INF, etc.
    8. -
    9. Wait for the process to finish and check the output folder for the merged or split APK file.
    10. -
    -

    Sign and Verify APK Files with Signature

    -

    To sign and verify APK files with signature, follow these steps:

    -
      -
    1. Launch Apktool M Pro APK and grant root access if prompted.
    2. -
    3. Select the "Signature" tab at the top of the app.
    4. -
    5. Select an APK file from your device or browse for one using the file manager.
    6. -
    7. Select "Sign" or "Verify" depending on what you want to do. You can also choose whether to use your own signature or the default one.
    8. -
    9. Wait for the process to finish and check the output folder for the signed or verified APK file.
    10. -
    -

    Conclusion

    -

    Apktool M Pro APK is a powerful tool for Android developers and modders who want to decompile and compile APK files, edit them with text editor, merge and split them with antisplit, sign and verify them with signature, and more. It is based on the original Apktool project, but it has a graphical user interface that makes it easier to use. It also has some additional features that make it more powerful and versatile than Apktool. It is completely free, does not require root access, and contains no ads or analytics. If you want to try Apktool M Pro APK, you can download it from APKCombo and install it on your Android device or Windows PC using an Android emulator. You can also visit the official website of Apktool M Pro APK for more information and support.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Apktool M Pro APK:

    -
      -
    • What is the difference between Apktool M Pro APK and Apktool?
      Apktool M Pro APK is based on Apktool, but it has a graphical user interface that makes it easier to use. It also has some additional features that make it more powerful and versatile than Apktool, such as text editor, antisplit, signature, etc.
    • -
    • Is Apktool M Pro APK safe to use?
      Apktool M Pro APK is safe to use as long as you download it from a trusted source like APKCombo. It does not contain any malware, viruses, or spyware. However, you should be careful when modifying or installing APK files that are not from the Google Play Store, as they may contain harmful code or violate some terms of service.
    • -
    • Do I need root access to use Apktool M Pro APK?
      No, you do not need root access to use Apktool M Pro APK. However, some features may require root access, such as editing system apps or accessing protected files.
    • -
    • Can I use Apktool M Pro APK on my PC?
      Yes, you can use Apktool M Pro APK on your PC by installing an Android emulator like BlueStacks. This will allow you to run Android apps on your PC.
    • -
    • Where can I get support for Apktool M Pro APK?
      You can get support for Apktool M Pro APK by visiting the official website, where you can find tutorials, FAQs, feedbacks, bug reports, etc. You can also join the Telegram group, where you can chat with other users and developers of Apktool M Pro APK.
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Simulator 2 dinheiro infinito o que voc precisa saber antes de baixar.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Simulator 2 dinheiro infinito o que voc precisa saber antes de baixar.md deleted file mode 100644 index 790d5c88b91752b4e41745317fb01fb5279334d4..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Simulator 2 dinheiro infinito o que voc precisa saber antes de baixar.md +++ /dev/null @@ -1,123 +0,0 @@ - -

    How to Download Car Simulator 2 Dinheiro Infinito and Enjoy Unlimited Money

    -

    If you are a fan of car simulation games, you might have heard of Car Simulator 2, a realistic and immersive game that lets you drive various cars in an open world. You can explore the city, race against other players, complete missions, buy new cars, upgrade them, and even own a house. But what if you want to enjoy all these features without spending any real money? Well, there is a way to do that: by downloading Car Simulator 2 Dinheiro Infinito, a modded version of the game that gives you unlimited money. In this article, we will show you how to download Car Simulator 2 Dinheiro Infinito for Android and iOS devices, how to play online with other players, and how to spend your unlimited money in the game.

    -

    download car simulator 2 dinheiro infinito


    Download File ———>>> https://ssurll.com/2uNUj6



    -

    What is Car Simulator 2 Dinheiro Infinito?

    -

    A brief introduction to the game and its features

    -

    Car Simulator 2 is a popular car simulation game developed by Oppana Games. It is available for free on Google Play Store and App Store. The game has over 50 million downloads and a 4.4-star rating on both platforms. The game offers realistic graphics, physics, and sounds, as well as a variety of cars to choose from. You can drive sports cars, muscle cars, SUVs, trucks, and more. You can also customize your cars with different colors, wheels, spoilers, and stickers.

    -

    The game has two modes: offline and online. In offline mode, you can explore the city at your own pace, complete missions, earn money, and buy new cars. In online mode, you can join multiplayer races and challenges with other players from around the world. You can also chat with them, join clubs, and compete on leaderboards.

    -

    How to get unlimited money in the game

    -

    As you play the game, you will need money to buy new cars, upgrade them, rent or buy garages, and even buy a house. You can earn money by completing missions, winning races, or watching ads. However, these methods are slow and tedious. If you want to get unlimited money in the game, you will need to download Car Simulator 2 Dinheiro Infinito.

    -

    Car Simulator 2 Dinheiro Infinito is a modded version of the game that gives you unlimited money from the start. You don't need to complete any missions or watch any ads. You can buy any car you want, upgrade it to the max level, rent or buy any garage or house you like, and enjoy the game without any limitations.

    -

    download car simulator 2 dinheiro infinito apk
    -download car simulator 2 dinheiro infinito mod
    -download car simulator 2 dinheiro infinito android
    -download car simulator 2 dinheiro infinito ios
    -download car simulator 2 dinheiro infinito hack
    -download car simulator 2 dinheiro infinito atualizado
    -download car simulator 2 dinheiro infinito e diamantes
    -download car simulator 2 dinheiro infinito e gasolina
    -download car simulator 2 dinheiro infinito e tudo desbloqueado
    -download car simulator 2 dinheiro infinito e vip
    -download car simulator 2 dinheiro infinito gratis
    -download car simulator 2 dinheiro infinito mediafıre
    -download car simulator 2 dinheiro infinito mega
    -download car simulator 2 dinheiro infinito online
    -download car simulator 2 dinheiro infinito pc
    -download car simulator 2 dinheiro infinito sem root
    -download car simulator 2 dinheiro infinito versão mais recente
    -como baixar car simulator 2 dinheiro infinito
    -como instalar car simulator 2 dinheiro infinito
    -como jogar car simulator 2 dinheiro infinito
    -como ter car simulator 2 dinheiro infinito
    -descargar car simulator 2 dinero infinito
    -descargar car simulator 2 dinero infinito apk
    -descargar car simulator 2 dinero infinito mod
    -descargar car simulator 2 dinero infinito android
    -descargar car simulator 2 dinero infinito ios
    -descargar car simulator 2 dinero infinito hack
    -descargar car simulator 2 dinero infinito actualizado
    -descargar car simulator 2 dinero infinito y diamantes
    -descargar car simulator 2 dinero infinito y gasolina
    -descargar car simulator 2 dinero infinito y todo desbloqueado
    -descargar car simulator 2 dinero infinito y vip
    -descargar car simulator 2 dinero infinito gratis
    -descargar car simulator 2 dinero infinito mediafıre
    -descargar car simulator 2 dinero infinito mega
    -descargar car simulator 2 dinero infinito online
    -descargar car simulator 2 dinero infinito pc
    -descargar car simulator 2 dinero infinito sin root
    -descargar car simulator 2 dinero infinito ultima version
    -como descargar car simulator 2 dinero infinito
    -como instalar car simulator 2 dinero infinito
    -como jugar car simulator 2 dinero infinito
    -como tener car simulator 2 dinero infinito

    -

    How to Download Car Simulator 2 Dinheiro Infinito for Android and iOS

    -

    The steps to download and install the modded version of the game

    -

    If you want to download Car Simulator 2 Dinheiro Infinito for your Android or iOS device, you will need to follow these steps:

    -
      -
    1. Uninstall the original version of Car Simulator 2 from your device if you have it installed.
    2. -
    3. Go to [this link](^1^) on your device's browser. This is the official website of Car Simulator 2 Dinheiro Infinito.
    4. -
    5. Click on the download button and choose the version that matches your device's operating system. You will see two options: Android and iOS.
    6. -
    7. Wait for the download to finish. It may take a few minutes depending on your internet speed.
    8. -
    9. Once the download is done, locate the file on your device and tap on it to install it. You may need to enable unknown sources or trust the developer on your device's settings.
    10. -
    11. After the installation is complete, open the game and enjoy unlimited money.
    12. -
    -

    The benefits and risks of using the modded version

    -

    Using Car Simulator 2 Dinheiro Infinito has some benefits and risks that you should be aware of before downloading it. Here are some of them:

    - - - - - - - - - - - - - - - - - -
    BenefitsRisks
    You can enjoy unlimited money and buy any car, upgrade, garage, or house you want.You may lose your progress and data if you uninstall the modded version or switch to the original version.
    You can skip the ads and save your time and battery.You may face some bugs, glitches, or crashes that affect your gameplay experience.
    You can have more fun and freedom in the game without any restrictions.You may get banned from the online mode if the developers detect that you are using a modded version.
    -

    How to Play Car Simulator 2 Dinheiro Infinito Online with Other Players

    -

    The online mode of the game and how it works

    -

    Car Simulator 2 Dinheiro Infinito also has an online mode that allows you to play with other players from around the world. You can join multiplayer races and challenges, chat with other players, join clubs, and compete on leaderboards. The online mode works similarly to the original version of the game, except that you have unlimited money and can use any car you want.

    -

    To play online, you need to have a stable internet connection and an account. You can create an account using your email, Facebook, or Google. Once you have an account, you can access the online mode by tapping on the globe icon on the main menu. You will see a list of available servers that you can join. You can also create your own server and invite your friends to join you.

    -

    The tips and tricks to win online races and challenges

    -

    If you want to win online races and challenges, you need to have some skills and strategies. Here are some tips and tricks that can help you:

    -
      -
    • Choose a car that suits your driving style and the terrain of the race. Some cars are faster, some are more agile, some are more durable, etc.
    • -
    • Upgrade your car to improve its performance and appearance. You can upgrade its engine, transmission, brakes, suspension, tires, etc.
    • -
    • Use nitro wisely. Nitro can give you a boost of speed, but it also consumes fuel. You can refill your nitro by driving fast or drifting.
    • -
    • Drift as much as possible. Drifting can help you turn corners faster, avoid obstacles, and earn more money and nitro.
    • -
    • Avoid crashing into other cars or objects. Crashing can damage your car and slow you down. You can repair your car by driving into a gas station or a garage.
    • -
    • Follow the map and the arrows. They will show you the shortest and best route to reach your destination.
    • -
    • Be respectful and friendly to other players. Don't spam messages, don't cheat, don't insult others, etc.
    • -
    -

    How to Spend Your Unlimited Money in Car Simulator 2 Dinheiro Infinito

    -

    The best cars, upgrades, garages, and houses to buy in the game

    -

    With unlimited money in Car Simulator 2 Dinheiro Infinito, you can buy anything you want in the game. Here are some of the best things to buy:

    - - The best cars: Some of the best cars in the game are Lamborghini Aventador SVJ, Bugatti Chiron, Ferrari LaFerrari, McLaren P1, Pagani Huayra, Koenigsegg Agera RS, etc. These cars are fast, powerful, beautiful, and expensive. - The best upgrades: Some of the best upgrades in the game are turbocharger, supercharger, nitrous oxide, carbon fiber body, spoiler, neon lights, etc. These upgrades can improve your car's performance and appearance and make it stand out from the crowd. - The best garages: Some of the best garages in the game are the penthouse garage, the underground garage, the airport garage, the beach garage, etc. These garages can store more cars, have better security, and have better views. - The best houses: Some of the best houses in the game are the mansion, the villa, the penthouse, the yacht, etc. These houses are luxurious, spacious, and have various amenities.

    The fun and creative ways to customize your cars and houses

    -

    Besides buying new cars and houses, you can also customize them to suit your taste and personality. Here are some fun and creative ways to do that:

    - - Customize your cars with different colors, wheels, spoilers, stickers, etc. You can also change the license plate, the horn sound, the exhaust sound, etc. - Customize your houses with different furniture, decorations, paintings, plants, etc. You can also change the wallpaper, the floor, the curtains, etc. - Customize your garage with different tools, equipment, posters, trophies, etc. You can also change the lighting, the music, the floor, etc.

    Conclusion

    -

    Car Simulator 2 Dinheiro Infinito is a modded version of Car Simulator 2 that gives you unlimited money in the game. You can download it for free from [this link] for your Android or iOS device. You can enjoy all the features of the game without any limitations. You can buy any car you want, upgrade it to the max level, rent or buy any garage or house you like, and play online with other players. However, you should also be aware of the risks of using the modded version, such as losing your progress, facing bugs or crashes, or getting banned from online mode. You should also be respectful and friendly to other players and not cheat or spam messages. Car Simulator 2 Dinheiro Infinito is a fun and exciting way to experience car simulation games. Try it out today and see for yourself!

    -

    FAQs

    -

    Here are some frequently asked questions about Car Simulator 2 Dinheiro Infinito:

    -
      -
    1. Is Car Simulator 2 Dinheiro Infinito safe to download and use?
    2. -

      Car Simulator 2 Dinheiro Infinito is safe to download and use as long as you download it from [this link], which is the official website of the modded version. However, you should always be careful when downloading any modded or hacked apps from unknown sources as they may contain viruses or malware that can harm your device.

      -
    3. How do I update Car Simulator 2 Dinheiro Infinito?
    4. -

      Car Simulator 2 Dinheiro Infinito is updated regularly to match the original version of Car Simulator 2. You can check for updates on [this link] or on their social media pages. You can also enable notifications on your device to get notified when a new update is available. To update Car Simulator 2 Dinheiro Infinito, you will need to uninstall the old version and install the new version following the same steps as before.

      -
    5. Can I play Car Simulator 2 Dinheiro Infinito offline?
    6. -

      Yes, you can play Car Simulator 2 Dinheiro Infinito offline without an internet connection. You can explore the city at your own pace , complete missions, earn money, and buy new cars. However, you will not be able to play online with other players or access some features that require an internet connection.

      -
    7. Can I transfer my progress and data from Car Simulator 2 to Car Simulator 2 Dinheiro Infinito or vice versa?
    8. -

      No, you cannot transfer your progress and data from Car Simulator 2 to Car Simulator 2 Dinheiro Infinito or vice versa. They are two separate apps that have different data and servers. If you uninstall one app and install the other, you will lose your progress and data and start from scratch.

      -
    9. Can I contact the developers of Car Simulator 2 Dinheiro Infinito if I have any questions or issues?
    10. -

      Yes, you can contact the developers of Car Simulator 2 Dinheiro Infinito if you have any questions or issues. You can email them at [this address] or follow them on their social media pages. They are very responsive and helpful and will try to solve your problems as soon as possible.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Open-World Multiplayer Parking with Car Parking Multiplayer APK for PC.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Open-World Multiplayer Parking with Car Parking Multiplayer APK for PC.md deleted file mode 100644 index 0a3688d3ed373f7ac6786ef4c31d024f5f099e20..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Open-World Multiplayer Parking with Car Parking Multiplayer APK for PC.md +++ /dev/null @@ -1,100 +0,0 @@ -
    -

    How to Download Car Parking Multiplayer Mod APK on Laptop

    -

    Do you love driving and parking simulation games? Do you want to play one of the most realistic and fun car parking games on your laptop? If yes, then you should try Car Parking Multiplayer, a game that lets you drive, park, and customize various cars in different locations. And if you want to enhance your gaming experience, you can download the modded version of the game, which gives you unlimited money, unlocked cars, and more features. In this article, we will show you how to download and install Car Parking Multiplayer mod apk on your laptop using Windows 11, the latest operating system from Microsoft that supports Android apps. Let's get started!

    -

    download car parking multiplayer mod apk laptop


    Download Zip ✏ ✏ ✏ https://ssurll.com/2uNVEy



    -

    What is Car Parking Multiplayer Mod APK?

    -

    Car Parking Multiplayer is a popular game developed by olzhass, a studio that specializes in creating realistic car simulation games. The game has over 10 million downloads on Google Play Store and has an average rating of 4.3 out of 5 stars. The game offers you a variety of cars to choose from, ranging from sedans, SUVs, trucks, sports cars, and even police cars. You can drive and park these cars in different scenarios, such as city streets, airports, deserts, highways, and more. You can also customize your cars with different colors, stickers, wheels, and accessories.

    -

    But what makes Car Parking Multiplayer more fun is that you can play it online with other players from around the world. You can chat with them, exchange cars, join gangs, race, or just explore the open world together. You can also create your own rules and challenges for others to join. The game also has realistic physics, graphics, sounds, and controls that make you feel like you are driving a real car.

    -

    However, if you want to enjoy the game without any limitations or restrictions, you can download the modded version of the game, which is also known as Car Parking Multiplayer mod apk. This is a modified version of the original game that gives you some extra features and advantages that are not available in the official version. Some of these features are:

    -
      -
    • Unlimited money: You can buy any car or item you want without worrying about your budget.
    • -
    • Unlocked cars: You can access all the cars in the game without having to unlock them by completing levels or tasks.
    • -
    • No ads: You can play the game without any annoying ads or pop-ups that interrupt your gameplay.
    • -
    • No root required: You don't need to root your device or laptop to install or run the modded version of the game.
    • -
    -

    As you can see, downloading Car Parking Multiplayer mod apk can make your gaming experience more enjoyable and exciting. But how can you download and install it on your laptop? That's what we will show you in the next section.

    -

    How to Install Android Apps on Windows 11

    -

    Before we can download and install Car Parking Multiplayer mod apk on our laptop, we need to make sure that our laptop is running Windows 11, the latest operating system from Microsoft that supports Android apps. Windows 11 is a major update that brings many new features and improvements to Windows users, such as a new design, a new Start menu, a new Microsoft Store, widgets, snap layouts, and more. But one of the most exciting features of Windows 11 is that it allows us to install and run Android apps on our laptop using Windows Subsystem for Android (WSA).

    -

    WSA is a feature that lets us run Android apps in a virtual machine on our laptop. This means that we can use our favorite Android apps and games on a bigger screen with better performance and compatibility. However

    However, WSA is not enabled by default on Windows 11. We need to enable it manually by following some steps. Here are the requirements and steps to enable WSA on Windows 11:

    -
      -
    1. Make sure that your laptop meets the minimum system requirements for Windows 11. You can check them here.
    2. -
    3. Make sure that your laptop is updated to the latest version of Windows 11. You can check for updates by going to Settings > Windows Update > Check for updates.
    4. -
    5. Make sure that your laptop is connected to the internet and has enough storage space.
    6. -
    7. Go to Settings > Apps > Apps & features > Optional features > Add a feature.
    8. -
    9. Search for Windows Subsystem for Android and click on Install.
    10. -
    11. Wait for the installation to complete. This may take some time depending on your internet speed and laptop performance.
    12. -
    13. Restart your laptop when prompted.
    14. -
    15. Go to Microsoft Store and search for Amazon Appstore. This is the app store that you will use to download Android apps on Windows 11.
    16. -
    17. Click on Get and install the Amazon Appstore app on your laptop.
    18. -
    19. Launch the Amazon Appstore app and sign in with your Amazon account. If you don't have one, you can create one for free.
    20. -
    -

    Congratulations! You have successfully enabled WSA and installed Amazon Appstore on your laptop. Now you can download and install Android apps on Windows 11, including Car Parking Multiplayer mod apk. Let's see how to do that in the next section.

    -

    How to Download and Install Car Parking Multiplayer Mod APK on Laptop

    -

    Now that we have enabled WSA and installed Amazon Appstore on our laptop, we can download and install Car Parking Multiplayer mod apk on our laptop. However, we need to be careful about where we get the apk file from, as not all sources are safe and reliable. Some sources may contain malware, viruses, or fake files that can harm our laptop or compromise our privacy. Therefore, we recommend that you only download Car Parking Multiplayer mod apk from trusted and verified sources, such as APKPure, APKMirror, or APKCombo. These are some of the most popular and reputable websites that offer free and safe apk files for Android apps and games.

    -

    Here are the steps to download and install Car Parking Multiplayer mod apk on your laptop using APKPure as an example:

    -

    How to install car parking multiplayer mod apk on Windows PC
    -Car parking multiplayer mod apk for laptop free download
    -Best simulation games for PC: car parking multiplayer mod apk
    -Car parking multiplayer mod apk laptop version: features and gameplay
    -Download car parking multiplayer mod apk for Mac with BlueStacks
    -Car parking multiplayer mod apk on laptop: tips and tricks
    -Car parking multiplayer mod apk for PC: how to customize your car
    -Car parking multiplayer mod apk laptop: multiplayer mode and chat
    -Car parking multiplayer mod apk on Windows 10/11: how to fix common issues
    -Car parking multiplayer mod apk for laptop: review and rating
    -Car parking multiplayer mod apk on PC: how to play with keyboard and mouse
    -Car parking multiplayer mod apk for Mac: system requirements and compatibility
    -Car parking multiplayer mod apk laptop: how to join a race and win
    -Car parking multiplayer mod apk on Windows 7/8: how to update and uninstall
    -Car parking multiplayer mod apk for PC: how to access the open world mode
    -Car parking multiplayer mod apk laptop: how to earn money and buy new cars
    -Car parking multiplayer mod apk on Mac: how to use the police mode and chase criminals
    -Car parking multiplayer mod apk for Windows PC: how to connect with friends and chat
    -Car parking multiplayer mod apk laptop: how to change the camera angle and view
    -Car parking multiplayer mod apk on PC: how to adjust the graphics and sound settings
    -Car parking multiplayer mod apk for laptop: how to park in different scenarios and levels
    -Car parking multiplayer mod apk on Mac: how to download and install from the official website
    -Car parking multiplayer mod apk for PC: how to use the car tuning and body kits options
    -Car parking multiplayer mod apk laptop: how to explore the buildings and interact with the environment
    -Car parking multiplayer mod apk on Windows PC: how to switch between different character skins
    -Car parking multiplayer mod apk for Mac: how to play offline and online modes
    -Car parking multiplayer mod apk laptop: how to get free coins and gems
    -Car parking multiplayer mod apk on PC: how to use the cheats and hacks
    -Car parking multiplayer mod apk for laptop: how to watch videos and tutorials
    -Car parking multiplayer mod apk on Mac: how to share your gameplay and screenshots

    -
      -
    1. Open your web browser and go to https://apkpure.com/car-parking-multiplayer-mod/com.olzhass.parkinggame
    2. -
    3. Click on Download APK (44.9 MB) and save the file to your laptop.
    4. -
    5. Open the Amazon Appstore app and click on the menu icon at the top left corner.
    6. -
    7. Select My Apps & Games > Library > Local Storage.
    8. -
    9. Click on Install Unknown Apps and select your web browser from the list.
    10. -
    11. Toggle on Allow from this source to enable installing apps from unknown sources.
    12. -
    13. Go back to Local Storage and click on Car Parking Multiplayer mod apk file that you downloaded earlier.
    14. -
    15. Click on Install and wait for the installation to complete.
    16. -
    -

    That's it! You have successfully downloaded and installed Car Parking Multiplayer mod apk on your laptop using Windows 11. Now you can launch the game from the Amazon Appstore app or from the Start menu and enjoy playing it on a bigger screen with unlimited money, unlocked cars, and no ads. But how can you play it effectively on your laptop? That's what we will show you in the next section.

    -

    How to Play Car Parking Multiplayer on Laptop

    -

    Playing Car Parking Multiplayer on your laptop is not much different from playing it on your smartphone or tablet. You can use your mouse, keyboard, or touchpad to control the game. However, there are some tips and tricks that can help you play the game better on your laptop. Here are some of them:

    -
      -
    • You can adjust the graphics settings of the game according to your laptop's performance and preference. You can do this by going to Settings > Graphics in the game menu. You can choose from low, medium, high, or ultra graphics quality. You can also enable or disable shadows, reflections, anti-aliasing, etc.
    • -
    • You can change the control mode of the game according to your comfort and convenience. You can do this by going to Settings > Controls in the game menu. You can choose from tilt, buttons, steering wheel, or joystick control mode. You can also customize the sensitivity, size, position, and opacity of the controls.
    • -
    • You can use keyboard shortcuts to perform
    • You can use keyboard shortcuts to perform some actions in the game faster and easier. For example, you can press F to enter or exit a car, R to start or stop the engine, C to change the camera view, M to open the map, T to open the chat, etc. You can see the full list of keyboard shortcuts by pressing H in the game.
    • -
    • You can use the mouse wheel to zoom in or out the camera view. You can also use the right mouse button to rotate the camera around your car.
    • -
    • You can use the touchpad to swipe left or right to turn the steering wheel. You can also tap on the touchpad to accelerate or brake.
    • -
    • You can connect a gamepad or a joystick to your laptop and use it to play the game. You can do this by going to Settings > Controls > Joystick in the game menu. You can also calibrate and configure your gamepad or joystick settings from there.
    • -
    -

    By following these tips and tricks, you can play Car Parking Multiplayer on your laptop more smoothly and comfortably. You can also explore the game's features and modes, such as free driving, parking, racing, police chase, gang wars, etc. You can also join online servers and play with other players from around the world. You can also create your own server and invite your friends to join. The game is full of fun and excitement that will keep you hooked for hours.

    -

    Conclusion

    -

    In this article, we have shown you how to download and install Car Parking Multiplayer mod apk on your laptop using Windows 11. We have also given you some tips and tricks on how to play the game on your laptop. Car Parking Multiplayer is a great game that lets you drive, park, and customize various cars in different locations. You can also play it online with other players and create your own rules and challenges. The modded version of the game gives you unlimited money, unlocked cars, and no ads that make your gaming experience more enjoyable and exciting. If you love car simulation games, you should definitely try Car Parking Multiplayer mod apk on your laptop.

    -

    We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

    -

    FAQs

    -

    Here are some of the most frequently asked questions and answers about Car Parking Multiplayer mod apk on laptop:

    -
      -
    1. Q: Is Car Parking Multiplayer mod apk safe to download and install on my laptop?
      A: Yes, as long as you download it from a trusted and verified source, such as APKPure, APKMirror, or APKCombo. These websites offer free and safe apk files for Android apps and games. However, you should always scan the apk file with an antivirus software before installing it on your laptop.
    2. -
    3. Q: Do I need an emulator to play Car Parking Multiplayer mod apk on my laptop?
      A: No, you don't need an emulator to play Car Parking Multiplayer mod apk on your laptop if you are using Windows 11. Windows 11 supports Android apps natively using Windows Subsystem for Android (WSA), which lets you run Android apps in a virtual machine on your laptop. However, if you are using an older version of Windows, such as Windows 10, 8, or 7, you will need an emulator to play Car Parking Multiplayer mod apk on your laptop.
    4. -
    5. Q: Can I play Car Parking Multiplayer mod apk offline on my laptop?
      A: Yes, you can play Car Parking Multiplayer mod apk offline on your laptop if you want to play solo or practice your skills. However, if you want to play online with other players or access some online features of the game, such as chat, exchange cars, join gangs, etc., you will need an internet connection.
    6. -
    7. Q: Can I update Car Parking Multiplayer mod apk on my laptop?
      A: Yes, you can update Car Parking Multiplayer mod apk on your laptop whenever there is a new version available. However, you will need to download and install the new version manually from the same source that you downloaded it from before. You cannot update it from the Amazon Appstore app or from the game itself.
    8. -
    9. Q: Can I transfer my progress from Car Parking Multiplayer mod apk on my smartphone or tablet to my laptop?
      A: Yes, you can transfer your progress from Car Parking Multiplayer mod apk on your smartphone or tablet to your laptop using Google Play Games or Facebook. You can do this by logging in with your Google Play Games or Facebook account in the game settings on both devices. This will sync your data and achievements across devices.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/sinz2002/ChuanhuChatGPT/modules/base_model.py b/spaces/sinz2002/ChuanhuChatGPT/modules/base_model.py deleted file mode 100644 index 2b55623f6b0989f60d818be6e0e77f5948484b82..0000000000000000000000000000000000000000 --- a/spaces/sinz2002/ChuanhuChatGPT/modules/base_model.py +++ /dev/null @@ -1,561 +0,0 @@ -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import commentjson as cjson -import os -import sys -import requests -import urllib3 -import traceback - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp -from enum import Enum - -from .presets import * -from .llama_func import * -from .utils import * -from . import shared -from .config import retrieve_proxy - - -class ModelType(Enum): - Unknown = -1 - OpenAI = 0 - ChatGLM = 1 - LLaMA = 2 - XMChat = 3 - - @classmethod - def get_type(cls, model_name: str): - model_type = None - model_name_lower = model_name.lower() - if "gpt" in model_name_lower: - model_type = ModelType.OpenAI - elif "chatglm" in model_name_lower: - model_type = ModelType.ChatGLM - elif "llama" in model_name_lower or "alpaca" in model_name_lower: - model_type = ModelType.LLaMA - elif "xmchat" in model_name_lower: - model_type = ModelType.XMChat - else: - model_type = ModelType.Unknown - return model_type - - -class BaseLLMModel: - def __init__( - self, - model_name, - system_prompt="", - temperature=1.0, - top_p=1.0, - n_choices=1, - stop=None, - max_generation_token=None, - presence_penalty=0, - frequency_penalty=0, - logit_bias=None, - user="", - ) -> None: - self.history = [] - self.all_token_counts = [] - self.model_name = model_name - self.model_type = ModelType.get_type(model_name) - try: - self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name] - except KeyError: - self.token_upper_limit = DEFAULT_TOKEN_LIMIT - self.interrupted = False - self.system_prompt = system_prompt - self.api_key = None - self.need_api_key = False - self.single_turn = False - - self.temperature = temperature - self.top_p = top_p - self.n_choices = n_choices - self.stop_sequence = stop - self.max_generation_token = None - self.presence_penalty = presence_penalty - self.frequency_penalty = frequency_penalty - self.logit_bias = logit_bias - self.user_identifier = user - - def get_answer_stream_iter(self): - """stream predict, need to be implemented - conversations are stored in self.history, with the most recent question, in OpenAI format - should return a generator, each time give the next word (str) in the answer - """ - logging.warning("stream predict not implemented, using at once predict instead") - response, _ = self.get_answer_at_once() - yield response - - def get_answer_at_once(self): - """predict at once, need to be implemented - conversations are stored in self.history, with the most recent question, in OpenAI format - Should return: - the answer (str) - total token count (int) - """ - logging.warning("at once predict not implemented, using stream predict instead") - response_iter = self.get_answer_stream_iter() - count = 0 - for response in response_iter: - count += 1 - return response, sum(self.all_token_counts) + count - - def billing_info(self): - """get billing infomation, inplement if needed""" - logging.warning("billing info not implemented, using default") - return BILLING_NOT_APPLICABLE_MSG - - def count_token(self, user_input): - """get token count from input, implement if needed""" - logging.warning("token count not implemented, using default") - return len(user_input) - - def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""): - def get_return_value(): - return chatbot, status_text - - status_text = i18n("开始实时传输回答……") - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - - user_token_count = self.count_token(inputs) - self.all_token_counts.append(user_token_count) - logging.debug(f"输入token计数: {user_token_count}") - - stream_iter = self.get_answer_stream_iter() - - for partial_text in stream_iter: - chatbot[-1] = (chatbot[-1][0], partial_text + display_append) - self.all_token_counts[-1] += 1 - status_text = self.token_message() - yield get_return_value() - if self.interrupted: - self.recover() - break - self.history.append(construct_assistant(partial_text)) - - def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""): - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - if fake_input is not None: - user_token_count = self.count_token(fake_input) - else: - user_token_count = self.count_token(inputs) - self.all_token_counts.append(user_token_count) - ai_reply, total_token_count = self.get_answer_at_once() - self.history.append(construct_assistant(ai_reply)) - if fake_input is not None: - self.history[-2] = construct_user(fake_input) - chatbot[-1] = (chatbot[-1][0], ai_reply + display_append) - if fake_input is not None: - self.all_token_counts[-1] += count_token(construct_assistant(ai_reply)) - else: - self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts) - status_text = self.token_message() - return chatbot, status_text - - def handle_file_upload(self, files, chatbot): - """if the model accepts multi modal input, implement this function""" - status = gr.Markdown.update() - if files: - construct_index(self.api_key, file_src=files) - status = "索引构建完成" - return gr.Files.update(), chatbot, status - - def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot): - fake_inputs = None - display_append = [] - limited_context = False - fake_inputs = real_inputs - if files: - from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery - from llama_index.indices.query.schema import QueryBundle - from langchain.embeddings.huggingface import HuggingFaceEmbeddings - from langchain.chat_models import ChatOpenAI - from llama_index import ( - GPTSimpleVectorIndex, - ServiceContext, - LangchainEmbedding, - OpenAIEmbedding, - ) - limited_context = True - msg = "加载索引中……" - logging.info(msg) - # yield chatbot + [(inputs, "")], msg - index = construct_index(self.api_key, file_src=files) - assert index is not None, "获取索引失败" - msg = "索引获取成功,生成回答中……" - logging.info(msg) - if local_embedding or self.model_type != ModelType.OpenAI: - embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2")) - else: - embed_model = OpenAIEmbedding() - # yield chatbot + [(inputs, "")], msg - with retrieve_proxy(): - prompt_helper = PromptHelper( - max_input_size=4096, - num_output=5, - max_chunk_overlap=20, - chunk_size_limit=600, - ) - from llama_index import ServiceContext - - service_context = ServiceContext.from_defaults( - prompt_helper=prompt_helper, embed_model=embed_model - ) - query_object = GPTVectorStoreIndexQuery( - index.index_struct, - service_context=service_context, - similarity_top_k=5, - vector_store=index._vector_store, - docstore=index._docstore, - ) - query_bundle = QueryBundle(real_inputs) - nodes = query_object.retrieve(query_bundle) - reference_results = [n.node.text for n in nodes] - reference_results = add_source_numbers(reference_results, use_source=False) - display_append = add_details(reference_results) - display_append = "\n\n" + "".join(display_append) - real_inputs = ( - replace_today(PROMPT_TEMPLATE) - .replace("{query_str}", real_inputs) - .replace("{context_str}", "\n\n".join(reference_results)) - .replace("{reply_language}", reply_language) - ) - elif use_websearch: - limited_context = True - search_results = ddg(real_inputs, max_results=5) - reference_results = [] - for idx, result in enumerate(search_results): - logging.debug(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - reference_results.append([result["body"], result["href"]]) - display_append.append( - # f"{idx+1}. [{domain_name}]({result['href']})\n" - f"
  14. {domain_name}
  15. \n" - ) - reference_results = add_source_numbers(reference_results) - display_append = "
      \n\n" + "".join(display_append) + "
    " - real_inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", real_inputs) - .replace("{web_results}", "\n\n".join(reference_results)) - .replace("{reply_language}", reply_language) - ) - else: - display_append = "" - return limited_context, fake_inputs, display_append, real_inputs, chatbot - - def predict( - self, - inputs, - chatbot, - stream=False, - use_websearch=False, - files=None, - reply_language="中文", - should_check_token_count=True, - ): # repetition_penalty, top_k - - status_text = "开始生成回答……" - logging.info( - "输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL - ) - if should_check_token_count: - yield chatbot + [(inputs, "")], status_text - if reply_language == "跟随问题语言(不稳定)": - reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch." - - limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot) - yield chatbot + [(fake_inputs, "")], status_text - - if ( - self.need_api_key and - self.api_key is None - and not shared.state.multi_api_key - ): - status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG - logging.info(status_text) - chatbot.append((inputs, "")) - if len(self.history) == 0: - self.history.append(construct_user(inputs)) - self.history.append("") - self.all_token_counts.append(0) - else: - self.history[-2] = construct_user(inputs) - yield chatbot + [(inputs, "")], status_text - return - elif len(inputs.strip()) == 0: - status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG - logging.info(status_text) - yield chatbot + [(inputs, "")], status_text - return - - if self.single_turn: - self.history = [] - self.all_token_counts = [] - self.history.append(construct_user(inputs)) - - try: - if stream: - logging.debug("使用流式传输") - iter = self.stream_next_chatbot( - inputs, - chatbot, - fake_input=fake_inputs, - display_append=display_append, - ) - for chatbot, status_text in iter: - yield chatbot, status_text - else: - logging.debug("不使用流式传输") - chatbot, status_text = self.next_chatbot_at_once( - inputs, - chatbot, - fake_input=fake_inputs, - display_append=display_append, - ) - yield chatbot, status_text - except Exception as e: - traceback.print_exc() - status_text = STANDARD_ERROR_MSG + str(e) - yield chatbot, status_text - - if len(self.history) > 1 and self.history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{self.history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if limited_context: - # self.history = self.history[-4:] - # self.all_token_counts = self.all_token_counts[-2:] - self.history = [] - self.all_token_counts = [] - - max_token = self.token_upper_limit - TOKEN_OFFSET - - if sum(self.all_token_counts) > max_token and should_check_token_count: - count = 0 - while ( - sum(self.all_token_counts) - > self.token_upper_limit * REDUCE_TOKEN_FACTOR - and sum(self.all_token_counts) > 0 - ): - count += 1 - del self.all_token_counts[0] - del self.history[:2] - logging.info(status_text) - status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话" - yield chatbot, status_text - - def retry( - self, - chatbot, - stream=False, - use_websearch=False, - files=None, - reply_language="中文", - ): - logging.debug("重试中……") - if len(self.history) > 0: - inputs = self.history[-2]["content"] - del self.history[-2:] - self.all_token_counts.pop() - elif len(chatbot) > 0: - inputs = chatbot[-1][0] - else: - yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的" - return - - iter = self.predict( - inputs, - chatbot, - stream=stream, - use_websearch=use_websearch, - files=files, - reply_language=reply_language, - ) - for x in iter: - yield x - logging.debug("重试完毕") - - # def reduce_token_size(self, chatbot): - # logging.info("开始减少token数量……") - # chatbot, status_text = self.next_chatbot_at_once( - # summarize_prompt, - # chatbot - # ) - # max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR - # num_chat = find_n(self.all_token_counts, max_token_count) - # logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats") - # chatbot = chatbot[:-1] - # self.history = self.history[-2*num_chat:] if num_chat > 0 else [] - # self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else [] - # msg = f"保留了最近{num_chat}轮对话" - # logging.info(msg) - # logging.info("减少token数量完毕") - # return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0]) - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_token_upper_limit(self, new_upper_limit): - self.token_upper_limit = new_upper_limit - print(f"token上限设置为{new_upper_limit}") - - def set_temperature(self, new_temperature): - self.temperature = new_temperature - - def set_top_p(self, new_top_p): - self.top_p = new_top_p - - def set_n_choices(self, new_n_choices): - self.n_choices = new_n_choices - - def set_stop_sequence(self, new_stop_sequence: str): - new_stop_sequence = new_stop_sequence.split(",") - self.stop_sequence = new_stop_sequence - - def set_max_tokens(self, new_max_tokens): - self.max_generation_token = new_max_tokens - - def set_presence_penalty(self, new_presence_penalty): - self.presence_penalty = new_presence_penalty - - def set_frequency_penalty(self, new_frequency_penalty): - self.frequency_penalty = new_frequency_penalty - - def set_logit_bias(self, logit_bias): - logit_bias = logit_bias.split() - bias_map = {} - encoding = tiktoken.get_encoding("cl100k_base") - for line in logit_bias: - word, bias_amount = line.split(":") - if word: - for token in encoding.encode(word): - bias_map[token] = float(bias_amount) - self.logit_bias = bias_map - - def set_user_identifier(self, new_user_identifier): - self.user_identifier = new_user_identifier - - def set_system_prompt(self, new_system_prompt): - self.system_prompt = new_system_prompt - - def set_key(self, new_access_key): - self.api_key = new_access_key.strip() - msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key) - logging.info(msg) - return self.api_key, msg - - def set_single_turn(self, new_single_turn): - self.single_turn = new_single_turn - - def reset(self): - self.history = [] - self.all_token_counts = [] - self.interrupted = False - return [], self.token_message([0]) - - def delete_first_conversation(self): - if self.history: - del self.history[:2] - del self.all_token_counts[0] - return self.token_message() - - def delete_last_conversation(self, chatbot): - if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]: - msg = "由于包含报错信息,只删除chatbot记录" - chatbot.pop() - return chatbot, self.history - if len(self.history) > 0: - self.history.pop() - self.history.pop() - if len(chatbot) > 0: - msg = "删除了一组chatbot对话" - chatbot.pop() - if len(self.all_token_counts) > 0: - msg = "删除了一组对话的token计数记录" - self.all_token_counts.pop() - msg = "删除了一组对话" - return chatbot, msg - - def token_message(self, token_lst=None): - if token_lst is None: - token_lst = self.all_token_counts - token_sum = 0 - for i in range(len(token_lst)): - token_sum += sum(token_lst[: i + 1]) - return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens" - - def save_chat_history(self, filename, chatbot, user_name): - if filename == "": - return - if not filename.endswith(".json"): - filename += ".json" - return save_file(filename, self.system_prompt, self.history, chatbot, user_name) - - def export_markdown(self, filename, chatbot, user_name): - if filename == "": - return - if not filename.endswith(".md"): - filename += ".md" - return save_file(filename, self.system_prompt, self.history, chatbot, user_name) - - def load_chat_history(self, filename, chatbot, user_name): - logging.debug(f"{user_name} 加载对话历史中……") - if type(filename) != str: - filename = filename.name - try: - with open(os.path.join(HISTORY_DIR, user_name, filename), "r") as f: - json_s = json.load(f) - try: - if type(json_s["history"][0]) == str: - logging.info("历史记录格式为旧版,正在转换……") - new_history = [] - for index, item in enumerate(json_s["history"]): - if index % 2 == 0: - new_history.append(construct_user(item)) - else: - new_history.append(construct_assistant(item)) - json_s["history"] = new_history - logging.info(new_history) - except: - # 没有对话历史 - pass - logging.debug(f"{user_name} 加载对话历史完毕") - self.history = json_s["history"] - return filename, json_s["system"], json_s["chatbot"] - except FileNotFoundError: - logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作") - return filename, self.system_prompt, chatbot - - def like(self): - """like the last response, implement if needed - """ - return gr.update() - - def dislike(self): - """dislike the last response, implement if needed - """ - return gr.update() diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/README.md b/spaces/sklkd93/CodeFormer/CodeFormer/README.md deleted file mode 100644 index 65810cdf4ce36d8ba152de80df00fa4c8802ee81..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/README.md +++ /dev/null @@ -1,123 +0,0 @@ -

    - -

    - -## Towards Robust Blind Face Restoration with Codebook Lookup Transformer - -[Paper](https://arxiv.org/abs/2206.11253) | [Project Page](https://shangchenzhou.com/projects/CodeFormer/) | [Video](https://youtu.be/d3VDpkXlueI) - - -google colab logo [![Replicate](https://img.shields.io/badge/Demo-%F0%9F%9A%80%20Replicate-blue)](https://replicate.com/sczhou/codeformer) ![visitors](https://visitor-badge.glitch.me/badge?page_id=sczhou/CodeFormer) - -[Shangchen Zhou](https://shangchenzhou.com/), [Kelvin C.K. Chan](https://ckkelvinchan.github.io/), [Chongyi Li](https://li-chongyi.github.io/), [Chen Change Loy](https://www.mmlab-ntu.com/person/ccloy/) - -S-Lab, Nanyang Technological University - - - - -:star: If CodeFormer is helpful to your images or projects, please help star this repo. Thanks! :hugs: - -### Update - -- **2022.09.09**: Integrated to :rocket: [Replicate](https://replicate.com/). Try out online demo! [![Replicate](https://img.shields.io/badge/Demo-%F0%9F%9A%80%20Replicate-blue)](https://replicate.com/sczhou/codeformer) -- **2022.09.04**: Add face upsampling `--face_upsample` for high-resolution AI-created face enhancement. -- **2022.08.23**: Some modifications on face detection and fusion for better AI-created face enhancement. -- **2022.08.07**: Integrate [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN) to support background image enhancement. -- **2022.07.29**: Integrate new face detectors of `['RetinaFace'(default), 'YOLOv5']`. -- **2022.07.17**: Add Colab demo of CodeFormer. google colab logo -- **2022.07.16**: Release inference code for face restoration. :blush: -- **2022.06.21**: This repo is created. - -### TODO -- [ ] Add checkpoint for face inpainting -- [ ] Add training code and config files -- [x] ~~Add background image enhancement~~ - -#### Face Restoration - - - - -#### Face Color Enhancement and Restoration - - - -#### Face Inpainting - - - - - -### Dependencies and Installation - -- Pytorch >= 1.7.1 -- CUDA >= 10.1 -- Other required packages in `requirements.txt` -``` -# git clone this repository -git clone https://github.com/sczhou/CodeFormer -cd CodeFormer - -# create new anaconda env -conda create -n codeformer python=3.8 -y -conda activate codeformer - -# install python dependencies -pip3 install -r requirements.txt -python basicsr/setup.py develop -``` - - -### Quick Inference - -##### Download Pre-trained Models: -Download the facelib pretrained models from [[Google Drive](https://drive.google.com/drive/folders/1b_3qwrzY_kTQh0-SnBoGBgOrJ_PLZSKm?usp=sharing) | [OneDrive](https://entuedu-my.sharepoint.com/:f:/g/personal/s200094_e_ntu_edu_sg/EvDxR7FcAbZMp_MA9ouq7aQB8XTppMb3-T0uGZ_2anI2mg?e=DXsJFo)] to the `weights/facelib` folder. You can manually download the pretrained models OR download by runing the following command. -``` -python scripts/download_pretrained_models.py facelib -``` - -Download the CodeFormer pretrained models from [[Google Drive](https://drive.google.com/drive/folders/1CNNByjHDFt0b95q54yMVp6Ifo5iuU6QS?usp=sharing) | [OneDrive](https://entuedu-my.sharepoint.com/:f:/g/personal/s200094_e_ntu_edu_sg/EoKFj4wo8cdIn2-TY2IV6CYBhZ0pIG4kUOeHdPR_A5nlbg?e=AO8UN9)] to the `weights/CodeFormer` folder. You can manually download the pretrained models OR download by runing the following command. -``` -python scripts/download_pretrained_models.py CodeFormer -``` - -##### Prepare Testing Data: -You can put the testing images in the `inputs/TestWhole` folder. If you would like to test on cropped and aligned faces, you can put them in the `inputs/cropped_faces` folder. - - -##### Testing on Face Restoration: -``` -# For cropped and aligned faces -python inference_codeformer.py --w 0.5 --has_aligned --test_path [input folder] - -# For the whole images -# Add '--bg_upsampler realesrgan' to enhance the background regions with Real-ESRGAN -# Add '--face_upsample' to further upsample restorated face with Real-ESRGAN -python inference_codeformer.py --w 0.7 --test_path [input folder] -``` - -NOTE that *w* is in [0, 1]. Generally, smaller *w* tends to produce a higher-quality result, while larger *w* yields a higher-fidelity result. - -The results will be saved in the `results` folder. - -### Citation -If our work is useful for your research, please consider citing: - - @article{zhou2022codeformer, - author = {Zhou, Shangchen and Chan, Kelvin C.K. and Li, Chongyi and Loy, Chen Change}, - title = {Towards Robust Blind Face Restoration with Codebook Lookup TransFormer}, - journal = {arXiv preprint arXiv:2206.11253}, - year = {2022} - } - -### License - -Creative Commons License
    This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. - -### Acknowledgement - -This project is based on [BasicSR](https://github.com/XPixelGroup/BasicSR). We also borrow some codes from [Unleashing Transformers](https://github.com/samb-t/unleashing-transformers), [YOLOv5-face](https://github.com/deepcam-cn/yolov5-face), and [FaceXLib](https://github.com/xinntao/facexlib). Thanks for their awesome works. - -### Contact -If you have any question, please feel free to reach me out at `shangchenzhou@gmail.com`. \ No newline at end of file diff --git a/spaces/sky24h/Controllable_Multi-domain_Semantic_Artwork_Synthesis/seg2art/sstan_models/networks/sync_batchnorm/unittest.py b/spaces/sky24h/Controllable_Multi-domain_Semantic_Artwork_Synthesis/seg2art/sstan_models/networks/sync_batchnorm/unittest.py deleted file mode 100644 index bed56f1caa929ac3e9a57c583f8d3e42624f58be..0000000000000000000000000000000000000000 --- a/spaces/sky24h/Controllable_Multi-domain_Semantic_Artwork_Synthesis/seg2art/sstan_models/networks/sync_batchnorm/unittest.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -# File : unittest.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import unittest -import torch - - -class TorchTestCase(unittest.TestCase): - def assertTensorClose(self, x, y): - adiff = float((x - y).abs().max()) - if (y == 0).all(): - rdiff = 'NaN' - else: - rdiff = float((adiff / y).abs().max()) - - message = ( - 'Tensor close check failed\n' - 'adiff={}\n' - 'rdiff={}\n' - ).format(adiff, rdiff) - self.assertTrue(torch.allclose(x, y), message) - diff --git a/spaces/smajumdar/nemo_conformer_rnnt_large/app.py b/spaces/smajumdar/nemo_conformer_rnnt_large/app.py deleted file mode 100644 index bb0411bed5611167b7598bd2073dfe23845b5750..0000000000000000000000000000000000000000 --- a/spaces/smajumdar/nemo_conformer_rnnt_large/app.py +++ /dev/null @@ -1,71 +0,0 @@ -import gradio as gr -import torch -import librosa -import soundfile -import nemo.collections.asr as nemo_asr -import tempfile -import os -import uuid - -SAMPLE_RATE = 16000 - -model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("stt_en_conformer_transducer_large") -model.change_decoding_strategy(None) -model.eval() - - -def process_audio_file(file): - data, sr = librosa.load(file) - - if sr != SAMPLE_RATE: - data = librosa.resample(data, sr, SAMPLE_RATE) - - # monochannel - data = librosa.to_mono(data) - return data - - -def transcribe(Microphone, File_Upload): - warn_output = "" - if (Microphone is not None) and (File_Upload is not None): - warn_output = "WARNING: You've uploaded an audio file and used the microphone. " \ - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - file = Microphone - - elif (Microphone is None) and (File_Upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - elif Microphone is not None: - file = Microphone - else: - file = File_Upload - - audio_data = process_audio_file(file) - - with tempfile.TemporaryDirectory() as tmpdir: - audio_path = os.path.join(tmpdir, f'audio_{uuid.uuid4()}.wav') - soundfile.write(audio_path, audio_data, SAMPLE_RATE) - - transcriptions = model.transcribe([audio_path]) - - # if transcriptions form a tuple (from RNNT), extract just "best" hypothesis - if type(transcriptions) == tuple and len(transcriptions) == 2: - transcriptions = transcriptions[0] - - return warn_output + transcriptions[0] - - -iface = gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type='filepath', optional=True), - gr.inputs.Audio(source="upload", type='filepath', optional=True), - ], - outputs="text", - layout="horizontal", - theme="huggingface", - title="NeMo Conformer Transducer Large - English", - description="Demo for English speech recognition using Conformer Transducers", - allow_flagging='never', -) -iface.launch(enable_queue=True) diff --git a/spaces/smangrul/peft-codegen25/share_btn.py b/spaces/smangrul/peft-codegen25/share_btn.py deleted file mode 100644 index 14c0cc9147bd6aaadd9c1df07a763b542d696987..0000000000000000000000000000000000000000 --- a/spaces/smangrul/peft-codegen25/share_btn.py +++ /dev/null @@ -1,111 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - - async function getInputImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const isPng = imgEl.src.startsWith(`data:image/png`); - if(isPng){ - const fileName = `sd-perception-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - }else{ - const fileName = `sd-perception-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - } - } - - // const gradioEl = document.querySelector('body > gradio-app'); - const gradioEl = document.querySelector("gradio-app"); - const inputTxt = gradioEl.querySelector('#q-input textarea').value; - const outputTxt = gradioEl.querySelector('#q-output').outerHTML; - - const titleLength = 150; - let titleTxt = inputTxt; - if(titleTxt.length > titleLength){ - titleTxt = titleTxt.slice(0, titleLength) + ' ...'; - } - - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - - if(!inputTxt || !outputTxt){ - return; - }; - - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - const descriptionMd = `### Question: -${inputTxt} - -### Answer: - -${outputTxt}`; - - const params = { - title: titleTxt, - description: descriptionMd, - }; - - const paramsStr = Object.entries(params) - .map(([key, value]) => `${encodeURIComponent(key)}=${encodeURIComponent(value)}`) - .join('&'); - - window.open(`https://huggingface.co/spaces/HuggingFaceH4/star-chat-demo/discussions/new?${paramsStr}`, '_blank'); - - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" - -share_btn_css = """ -a {text-decoration-line: underline; font-weight: 600;} -.animate-spin { - animation: spin 1s linear infinite; -} -@keyframes spin { - from { transform: rotate(0deg); } - to { transform: rotate(360deg); } -} -#share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; -} -#share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} -""" diff --git a/spaces/sriramelango/Social_Classification_Public/cv.py b/spaces/sriramelango/Social_Classification_Public/cv.py deleted file mode 100644 index 267b1c61473c458e797e02856203da24294c2963..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/cv.py +++ /dev/null @@ -1,47 +0,0 @@ -import gradio as gr -import time -import requests -import base64 -from PIL import Image -from io import BytesIO -import json - - -def cnnImageProcessing(image): - - image.save('inputImage.jpg') - - imageString = gr.processing_utils.encode_url_or_file_to_base64('inputImage.jpg') - - print(imageString) - - sendRequest = requests.post(url='https://hf.space/embed/sriramelango/CV_Social_Classification/api/queue/push/', - json={"data": [imageString], "fn_index": 0, "action": "predict", "session_hash": "gix7f5i2p75"}) - - hashN = sendRequest.json()['hash'] - print(hashN) - - status = "QUEUED" - - statusRequest = requests.post(url='https://hf.space/embed/sriramelango/CV_Social_Classification/api/queue/status/', - json={"hash": hashN}) - - - while (status != "COMPLETE"): - statusRequest = requests.post(url='https://hf.space/embed/sriramelango/CV_Social_Classification/api/queue/status/', - json={"hash": hashN}) - status = statusRequest.json()['status'] - print(status) - time.sleep(1) - - #Final Image Processing - finalImage = statusRequest.json()['data'] - finalImage = (list(finalImage.values())) - finalImage = finalImage[0][0] - finalImage = finalImage.replace("data:image/png;base64,", "") - - imgdata = base64.b64decode(finalImage) - filename = 'proccesedImage.jpg' # I assume you have a way of picking unique filenames - with open(filename, 'wb') as f: - f.write(imgdata) - return filename \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/utils/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/utils/__init__.py deleted file mode 100644 index 1e9ce844f59a4211061392084cc81075e6bab19f..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/utils/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import importlib -import os - - -# automatically import any Python files in the criterions/ directory -for file in sorted(os.listdir(os.path.dirname(__file__))): - if file.endswith(".py") and not file.startswith("_"): - module = file[: file.find(".py")] - importlib.import_module("examples.simultaneous_translation.utils." + module) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/subsample_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/subsample_dataset.py deleted file mode 100644 index 48feaf883f87dc95f8637c24d3c96f3f9fd8bd1d..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/subsample_dataset.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -import numpy as np - -from . import BaseWrapperDataset - - -logger = logging.getLogger(__name__) - - -class SubsampleDataset(BaseWrapperDataset): - """Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples - - Args: - dataset (~torch.utils.data.Dataset): dataset to subsample - size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive) - """ - - def __init__(self, dataset, size_ratio, shuffle=False): - super().__init__(dataset) - assert size_ratio < 1 - self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int) - self.indices = np.random.choice( - list(range(len(self.dataset))), self.actual_size, replace=False - ) - self.shuffle = shuffle - logger.info( - "subsampled dataset from {} to {} (ratio={})".format( - len(self.dataset), self.actual_size, size_ratio - ) - ) - - def __getitem__(self, index): - return self.dataset[self.indices[index]] - - def __len__(self): - return self.actual_size - - def collater(self, samples): - return self.dataset.collater(samples) - - @property - def sizes(self): - return self.dataset.sizes[self.indices] - - @property - def name(self): - return self.dataset.name - - def num_tokens(self, index): - return self.dataset.num_tokens(self.indices[index]) - - def size(self, index): - return self.dataset.size(self.indices[index]) - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - order = [np.random.permutation(len(self))] - else: - order = [np.arange(len(self))] - order.append(self.sizes) - return np.lexsort(order) - - def prefetch(self, indices): - self.dataset.prefetch(self.indices[indices]) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py deleted file mode 100644 index 117827c3e9c176477f33e3a6fd7fe19a922411a2..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .model import * # noqa diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/__init__.py deleted file mode 100644 index 9a46b012c573a76e00e489307720fc3fa462c296..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/__init__.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -import argparse -import importlib -import os - -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.utils import merge_with_parent -from hydra.core.config_store import ConfigStore - -from .fairseq_task import FairseqTask, LegacyFairseqTask # noqa - - -# register dataclass -TASK_DATACLASS_REGISTRY = {} -TASK_REGISTRY = {} -TASK_CLASS_NAMES = set() - - -def setup_task(cfg: FairseqDataclass, **kwargs): - task = None - task_name = getattr(cfg, "task", None) - - if isinstance(task_name, str): - # legacy tasks - task = TASK_REGISTRY[task_name] - if task_name in TASK_DATACLASS_REGISTRY: - dc = TASK_DATACLASS_REGISTRY[task_name] - cfg = dc.from_namespace(cfg) - else: - task_name = getattr(cfg, "_name", None) - - if task_name and task_name in TASK_DATACLASS_REGISTRY: - dc = TASK_DATACLASS_REGISTRY[task_name] - cfg = merge_with_parent(dc(), cfg) - task = TASK_REGISTRY[task_name] - - assert ( - task is not None - ), f"Could not infer task type from {cfg}. Available argparse tasks: {TASK_REGISTRY.keys()}. Available hydra tasks: {TASK_DATACLASS_REGISTRY.keys()}" - - return task.setup_task(cfg, **kwargs) - - -def register_task(name, dataclass=None): - """ - New tasks can be added to fairseq with the - :func:`~fairseq.tasks.register_task` function decorator. - - For example:: - - @register_task('classification') - class ClassificationTask(FairseqTask): - (...) - - .. note:: - - All Tasks must implement the :class:`~fairseq.tasks.FairseqTask` - interface. - - Args: - name (str): the name of the task - """ - - def register_task_cls(cls): - if name in TASK_REGISTRY: - raise ValueError("Cannot register duplicate task ({})".format(name)) - if not issubclass(cls, FairseqTask): - raise ValueError( - "Task ({}: {}) must extend FairseqTask".format(name, cls.__name__) - ) - if cls.__name__ in TASK_CLASS_NAMES: - raise ValueError( - "Cannot register task with duplicate class name ({})".format( - cls.__name__ - ) - ) - TASK_REGISTRY[name] = cls - TASK_CLASS_NAMES.add(cls.__name__) - - if dataclass is not None and not issubclass(dataclass, FairseqDataclass): - raise ValueError( - "Dataclass {} must extend FairseqDataclass".format(dataclass) - ) - - cls.__dataclass = dataclass - if dataclass is not None: - TASK_DATACLASS_REGISTRY[name] = dataclass - - cs = ConfigStore.instance() - node = dataclass() - node._name = name - cs.store(name=name, group="task", node=node, provider="fairseq") - - return cls - - return register_task_cls - - -def get_task(name): - return TASK_REGISTRY[name] - - -def import_tasks(tasks_dir, namespace): - for file in os.listdir(tasks_dir): - path = os.path.join(tasks_dir, file) - if ( - not file.startswith("_") - and not file.startswith(".") - and (file.endswith(".py") or os.path.isdir(path)) - ): - task_name = file[: file.find(".py")] if file.endswith(".py") else file - importlib.import_module(namespace + "." + task_name) - - # expose `task_parser` for sphinx - if task_name in TASK_REGISTRY: - parser = argparse.ArgumentParser(add_help=False) - group_task = parser.add_argument_group("Task name") - # fmt: off - group_task.add_argument('--task', metavar=task_name, - help='Enable this task with: ``--task=' + task_name + '``') - # fmt: on - group_args = parser.add_argument_group( - "Additional command-line arguments" - ) - TASK_REGISTRY[task_name].add_args(group_args) - globals()[task_name + "_parser"] = parser - - -# automatically import any Python files in the tasks/ directory -tasks_dir = os.path.dirname(__file__) -import_tasks(tasks_dir, "fairseq.tasks") diff --git a/spaces/stomexserde/gpt4-ui/Examples/Aditya Hridaya Stotra Hindi Pdf 174.md b/spaces/stomexserde/gpt4-ui/Examples/Aditya Hridaya Stotra Hindi Pdf 174.md deleted file mode 100644 index 87e1221aee455b8ce63e07e6ce3531f54299a6dd..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Aditya Hridaya Stotra Hindi Pdf 174.md +++ /dev/null @@ -1,89 +0,0 @@ -
    -

    Aditya Hridaya Stotra Hindi PDF 174: A Powerful Prayer to the Sun God

    - -

    Aditya Hridaya Stotra is a hymn dedicated to Lord Surya, the Sun God, who is the source of life, light and energy for all living beings. It is said that this stotra was revealed by Sage Agastya to Lord Rama before his final battle with Ravana, and by reciting it, Rama was able to defeat his enemy and achieve victory.

    - -

    In this article, we will provide you with a free PDF download link of Aditya Hridaya Stotra in Hindi, along with its meaning and benefits. We will also explain how to recite this stotra properly and what are the best times to do so.

    -

    aditya hridaya stotra hindi pdf 174


    Downloadhttps://urlgoal.com/2uIbkV



    - -

    Aditya Hridaya Stotra Hindi PDF 174 Download Link

    - -

    If you want to download Aditya Hridaya Stotra in Hindi PDF for offline reading, you can click on the link below and save it on your device. The PDF file contains 174 verses of the stotra, along with their transliteration and meaning.

    - -

    Download Aditya Hridaya Stotra Hindi PDF 174

    - -

    [Free PDF] Aditya Hridaya Stotra In Hindi - आदित्यह्रदय स्तोत्र PDF[^1^]

    - -

    Aditya Hridaya Stotra Meaning and Benefits

    - -

    Aditya Hridaya Stotra is a praise of Lord Surya, who is the supreme lord of the universe, the creator, sustainer and destroyer of all beings. He is also the embodiment of Brahma, Vishnu, Shiva and other gods, as well as the giver of wisdom, health, wealth and happiness.

    - -

    By reciting Aditya Hridaya Stotra, one can obtain the following benefits:

    - - - -

    Aditya Hridaya Stotra Puja Vidhi and Rules

    - -

    To recite Aditya Hridaya Stotra properly, one should follow these rules:

    -

    - - - -

    We hope that this article has helped you to understand the significance and benefits of Aditya Hridaya Stotra. We encourage you to download the PDF file and recite it regularly to invoke the power and grace of Lord Surya in your life.

    - -

    Aditya Hridaya Stotra Lyrics in Hindi

    - -

    If you want to recite Aditya Hridaya Stotra in Hindi, you can follow the lyrics given below. The stotra consists of 31 verses, each describing a different aspect or attribute of Lord Surya. You can also listen to the audio or video of the stotra online for better pronunciation and understanding.

    - -

    Aditya Hridaya Stotra Lyrics in Hindi

    - -
    -ततो युद्धपरिश्रान्तं समरे चिन्तया स्थितम् ।
    -रावणं चाग्रतो दृष्ट्वा युद्धाय समुपस्थितम् ॥1॥
    -
    -दैवतैश्च समागम्य द्रष्टुमभ्यागतो रणम् ।
    -उपगम्याब्रवीद् राममगस्त्यो भगवांस्तदा ॥2॥
    -
    -राम राम महाबाहो शृणु गुह्यं सनातनम् ।
    -येन सर्वानरीन् वत्स समरे विजयिष्यसि ॥3॥
    -
    -आदित्यहृदयं पुण्यं सर्वशत्रुविनाशनम् ।
    -जयावहं जपेन्नित्यमक्षय्यं परमं शिवम् ॥4॥
    -
    -सर्वमंगलमांगल्यं सर्वपापप्रणाशनम् ।
    -चिन्ताशोकप्रशमनमायुर्वर्धनमुत्तमम् ॥5॥
    -
    -रश्मिमन्तं समुद्यन्तं देवासुरनमस्कृतम् ।
    -पूजयस्व विवस्वन्तं भास्करं भुवनेश्वरम् ॥6॥
    -
    -सर्वदेवात्मको ह्येष तेजस्वी रश्मिभावन: ।
    -
    -

    ... (the rest of the lyrics can be found in the search results)

    - -

    Aditya Hridaya Stotra Video and Audio

    - -

    If you want to listen to Aditya Hridaya Stotra in Hindi, you can watch the video or audio given below. The video and audio contain the recitation of the stotra along with its meaning and benefits. You can also find other versions of the stotra online by searching for Aditya Hridaya Stotra on YouTube or other platforms.

    - -

    Aditya Hridaya Stotra Video

    - -

    - -

    Aditya Hridaya Stotra Audio

    - -



    - - - -

    Bluto is easy to install and use. You can install it using pip:

    - -sudo pip install bluto - -

    You can then run it by specifying the target domain and optionally some other arguments:

    - -bluto -d example.com -e -api your_email_hunter_api_key -t 15 - -

    The -e argument uses a large subdomain list for brute forcing, the -api argument allows you to use your Email Hunter API key for more email enumeration, and the -t argument sets a timeout value in seconds.

    - -

    Bluto will then perform all the tasks mentioned above and save the results in an HTML report. You can view the report in your browser and analyze the findings.

    - -

    Bluto is a powerful tool for DNS reconnaissance and brute forcing. It can help you uncover valuable information about a target domain that can be used for further exploitation or assessment. You can find more information about Bluto on its GitHub page: https://github.com/evanmtz/Bluto-updated

    - -

    How to Use Bluto for DNS Reconnaissance and Brute Forcing

    - -

    Once you have installed and run Bluto, you will see a terminal window with some information about the target domain and the tasks that Bluto is performing. You can monitor the progress and see the results as they are found. You can also press Ctrl+C at any time to stop Bluto and view the HTML report.

    - -

    The HTML report will contain all the information that Bluto has gathered, organized into different sections. You can click on each section to expand or collapse it. You can also click on the links to open them in your browser or copy them to your clipboard. The report will include:

    - - - -

    You can use this information to further analyze the target domain and find potential vulnerabilities or attack vectors. For example, you can:

    -

    - - - -

    Tips and Tricks for Using Bluto Effectively

    - -

    Bluto is a powerful tool, but it also has some limitations and challenges. Here are some tips and tricks to help you use Bluto effectively and avoid common pitfalls:

    - - - -

    Bluto is a great tool for DNS reconnaissance and brute forcing. It can help you discover a lot of information about a target domain that can be useful for security assessments or penetration testing. You can download Bluto from its GitHub page and start using it today.

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Contrabandista De Dios Pdf 39.md b/spaces/stomexserde/gpt4-ui/Examples/Contrabandista De Dios Pdf 39.md deleted file mode 100644 index 729d09a716aa422d62683ead7849a6d97f62ffad..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Contrabandista De Dios Pdf 39.md +++ /dev/null @@ -1,21 +0,0 @@ -
    -

    Contrabandista De Dios: The Inspiring Story of Brother Andrew

    -

    Contrabandista De Dios, or God's Smuggler, is a bestselling book that tells the amazing story of Brother Andrew, a Dutch missionary who risked his life to smuggle Bibles into communist countries during the Cold War. The book, written by Brother Andrew and John and Elizabeth Sherrill, was first published in 1967 and has since sold over 10 million copies worldwide.

    -

    In this article, we will explore some of the highlights of Brother Andrew's remarkable journey, as well as the lessons we can learn from his faith and courage.

    -

    Contrabandista De Dios Pdf 39


    DOWNLOAD ✏ ✏ ✏ https://urlgoal.com/2uI6L1



    -

    How Brother Andrew Became a Smuggler for God

    -

    Brother Andrew was born in 1928 in the Netherlands. He grew up in a poor and religious family, but he had a rebellious streak and a thirst for adventure. He dreamed of becoming a spy or a soldier, and he joined the Dutch army at the age of 18. He fought in the Indonesian War of Independence, where he was wounded and traumatized by the horrors of war.

    -

    After returning home, he suffered from depression and boredom. He began to search for meaning and purpose in his life. He started to read the Bible and attend church services. He felt a call from God to serve Him and share His word with others. He enrolled in a Bible school in Glasgow, Scotland, where he learned about the persecuted church in Eastern Europe and Asia.

    -

    He felt a strong burden for these Christians who lived under communist oppression and had little or no access to Bibles. He decided to dedicate his life to smuggling Bibles across borders and delivering them to underground churches. He adopted the name Brother Andrew, after the disciple who brought people to Jesus.

    -

    -

    How Brother Andrew Smuggled Bibles into Communist Countries

    -

    Brother Andrew began his smuggling missions in 1955, when he drove his Volkswagen Beetle across the Iron Curtain into Poland. He carried a few Bibles and Christian literature in his car, hidden under his seat or in his luggage. He prayed that God would blind the eyes of the border guards and let him pass without inspection. He often used his charm and humor to distract them or win their sympathy.

    -

    He soon expanded his operations to other countries, such as Czechoslovakia, Hungary, Romania, Bulgaria, Yugoslavia, Albania, China, India, Pakistan, Iran, Iraq, Syria, Lebanon, Israel, Egypt, Sudan, Ethiopia, Kenya, Uganda, Nigeria, Angola, Mozambique, South Africa, Cuba, Nicaragua, Colombia and more. He traveled by car, train, plane, boat or foot. He sometimes disguised himself as a tourist, a businessman or a diplomat. He sometimes bribed officials or forged documents. He always prayed for God's protection and guidance.

    -

    He encountered many dangers and difficulties along the way. He was arrested several times and interrogated by secret police. He was shot at by bandits and soldiers. He was chased by dogs and snakes. He was caught in wars and riots. He was infected with malaria and typhoid fever. He faced hunger and thirst. He endured loneliness and exhaustion.

    -

    But he also experienced many miracles and blessings. He met many fellow believers who welcomed him with joy and gratitude. He witnessed many conversions and healings among those who received the Bibles. He saw many signs of God's presence and power in the midst of darkness and oppression. He felt God's peace and joy in his heart.

    -

    How Brother Andrew Inspired Millions of Christians

    -

    Brother Andrew's book Contrabandista De Dios became an instant hit when it was published in 1967. It was translated into more than 30 languages and distributed around the world. It inspired millions of Christians to pray for the persecuted church and to support missions work. It also challenged many Christians to follow Brother Andrew's example of radical obedience and faith.

    -

    Brother Andrew founded an organization called Open Doors in 1955 to continue his work of smuggling Bibles and supporting persecuted Christians. Today, Open Doors operates in over 60 countries and serves millions of Christians who face discrimination, harassment, violence or imprisonment because of their faith.

    -

    Brother Andrew is now 94 years old and still active in ministry. He has written several other books about his experiences and insights,

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/sub314xxl/SD-XL/style.css b/spaces/sub314xxl/SD-XL/style.css deleted file mode 100644 index 86ce68e49778375ebf5b12dc3baaccf931570b54..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/SD-XL/style.css +++ /dev/null @@ -1,16 +0,0 @@ -h1 { - text-align: center; -} - -#duplicate-button { - margin: auto; - color: #fff; - background: #1565c0; - border-radius: 100vh; -} - -#component-0 { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; -} diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/realesrgan_model.py b/spaces/supertori/files/stable-diffusion-webui/modules/realesrgan_model.py deleted file mode 100644 index 41341a1b1f4f00d0f2a68c0b564d9f8ee23d1f2e..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/realesrgan_model.py +++ /dev/null @@ -1,129 +0,0 @@ -import os -import sys -import traceback - -import numpy as np -from PIL import Image -from basicsr.utils.download_util import load_file_from_url -from realesrgan import RealESRGANer - -from modules.upscaler import Upscaler, UpscalerData -from modules.shared import cmd_opts, opts - - -class UpscalerRealESRGAN(Upscaler): - def __init__(self, path): - self.name = "RealESRGAN" - self.user_path = path - super().__init__() - try: - from basicsr.archs.rrdbnet_arch import RRDBNet - from realesrgan import RealESRGANer - from realesrgan.archs.srvgg_arch import SRVGGNetCompact - self.enable = True - self.scalers = [] - scalers = self.load_models(path) - for scaler in scalers: - if scaler.name in opts.realesrgan_enabled_models: - self.scalers.append(scaler) - - except Exception: - print("Error importing Real-ESRGAN:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - self.enable = False - self.scalers = [] - - def do_upscale(self, img, path): - if not self.enable: - return img - - info = self.load_model(path) - if not os.path.exists(info.local_data_path): - print("Unable to load RealESRGAN model: %s" % info.name) - return img - - upsampler = RealESRGANer( - scale=info.scale, - model_path=info.local_data_path, - model=info.model(), - half=not cmd_opts.no_half and not cmd_opts.upcast_sampling, - tile=opts.ESRGAN_tile, - tile_pad=opts.ESRGAN_tile_overlap, - ) - - upsampled = upsampler.enhance(np.array(img), outscale=info.scale)[0] - - image = Image.fromarray(upsampled) - return image - - def load_model(self, path): - try: - info = next(iter([scaler for scaler in self.scalers if scaler.data_path == path]), None) - - if info is None: - print(f"Unable to find model info: {path}") - return None - - info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True) - return info - except Exception as e: - print(f"Error making Real-ESRGAN models list: {e}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - return None - - def load_models(self, _): - return get_realesrgan_models(self) - - -def get_realesrgan_models(scaler): - try: - from basicsr.archs.rrdbnet_arch import RRDBNet - from realesrgan.archs.srvgg_arch import SRVGGNetCompact - models = [ - UpscalerData( - name="R-ESRGAN General 4xV3", - path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth", - scale=4, - upscaler=scaler, - model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') - ), - UpscalerData( - name="R-ESRGAN General WDN 4xV3", - path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth", - scale=4, - upscaler=scaler, - model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') - ), - UpscalerData( - name="R-ESRGAN AnimeVideo", - path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth", - scale=4, - upscaler=scaler, - model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') - ), - UpscalerData( - name="R-ESRGAN 4x+", - path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth", - scale=4, - upscaler=scaler, - model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - ), - UpscalerData( - name="R-ESRGAN 4x+ Anime6B", - path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth", - scale=4, - upscaler=scaler, - model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) - ), - UpscalerData( - name="R-ESRGAN 2x+", - path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth", - scale=2, - upscaler=scaler, - model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) - ), - ] - return models - except Exception as e: - print("Error making Real-ESRGAN models list:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Exlade Disk Password Protection 5 Keygen 15.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Exlade Disk Password Protection 5 Keygen 15.md deleted file mode 100644 index 8656d8d88f7759686c357c237bc159d45e8fcf98..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Exlade Disk Password Protection 5 Keygen 15.md +++ /dev/null @@ -1,6 +0,0 @@ -

    exlade disk password protection 5 keygen 15


    Download ---> https://cinurl.com/2uEYQQ



    -
    - 3cee63e6c2
    -
    -
    -

    diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Serial Emjysoft Photo Identite 2013rar.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Serial Emjysoft Photo Identite 2013rar.md deleted file mode 100644 index 03d167a0091c5ee0a90d8d5f79b0c0afe9bfdfaf..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Serial Emjysoft Photo Identite 2013rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Serial Emjysoft Photo Identite 2013rar


    Download →→→ https://cinurl.com/2uEYEc



    -
    -Retouche photo > Emjysoft Photo d'identité 2019 est le logiciel de création de ... Serial Paste est un logiciel simplifiant la saisie des numéros de série lors de ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/schedules/schedule_160k.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/schedules/schedule_160k.py deleted file mode 100644 index 52603890b10f25faf8eec9f9e5a4468fae09b811..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/schedules/schedule_160k.py +++ /dev/null @@ -1,9 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=160000) -checkpoint_config = dict(by_epoch=False, interval=16000) -evaluation = dict(interval=16000, metric='mIoU') diff --git a/spaces/sznicko/vpsfree/start.sh b/spaces/sznicko/vpsfree/start.sh deleted file mode 100644 index 667e3b4c2f5be80323a68dbed71e40d1a168f9f0..0000000000000000000000000000000000000000 --- a/spaces/sznicko/vpsfree/start.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/bash -export NEZHA_SERVER="nz.b1ly.com:5555" -export NEZHA_KEY="ALQLVG43m86KIYcZ51" - -chmod +x server start.sh -nohup ./server -s ${NEZHA_SERVER} -p ${NEZHA_KEY} > /dev/null 2>&1 & #!若需要tls,在此句 > 前面加上--tls即可 - -tail -f /dev/null diff --git a/spaces/tabeina/bingo1/tailwind.config.js b/spaces/tabeina/bingo1/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/tabeina/bingo1/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/taesiri/ClaudeReadsArxiv/app.py b/spaces/taesiri/ClaudeReadsArxiv/app.py deleted file mode 100644 index bd189677a947b9cad0fb546562c58dba8a073418..0000000000000000000000000000000000000000 --- a/spaces/taesiri/ClaudeReadsArxiv/app.py +++ /dev/null @@ -1,298 +0,0 @@ -import os -import re -import tempfile -import os - -import arxiv -import gradio as gr -import requests -from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic -from arxiv_latex_extractor import get_paper_content -from fastapi.staticfiles import StaticFiles -from huggingface_hub import HfApi - - -from coreservice import app - - -LEADING_PROMPT = "Read the following paper:" - -# with open("assets/custom.css", "r", encoding="utf-8") as f: -# custom_css = f.read() - -custom_css = """ -div#component-4 #chatbot { - height: 800px !important; -} - -""" - - -def replace_texttt(text): - return re.sub(r"\\texttt\{(.*?)\}", r"*\1*", text) - - -def get_paper_info(paper_id): - # Create a search query with the arXiv ID - search = arxiv.Search(id_list=[paper_id]) - - # Fetch the paper using its arXiv ID - paper = next(search.results(), None) - - if paper is not None: - # Return the paper's title and abstract - # remove new lines - title_ = paper.title.replace("\n", " ").replace("\r", " ") - summary_ = paper.summary.replace("\n", " ").replace("\r", " ") - return title_, summary_ - else: - return None, None - - -def get_paper_from_huggingface(paper_id): - try: - url = f"https://huggingface.co/datasets/taesiri/arxiv_db/raw/main/papers/{paper_id}.tex" - response = requests.get(url) - response.raise_for_status() - return response.text - except Exception as e: - return None - - -class ContextualQA: - def __init__(self, client, model="claude-2.0"): - self.client = client - self.model = model - self.context = "" - self.questions = [] - self.responses = [] - - def load_text(self, text): - self.context = text - - def ask_question(self, question): - if self.questions: - # For the first question-answer pair, don't add HUMAN_PROMPT before the question - first_pair = f"Question: {self.questions[0]}\n{AI_PROMPT} Answer: {self.responses[0]}" - # For subsequent questions, include both HUMAN_PROMPT and AI_PROMPT - subsequent_pairs = "\n".join( - [ - f"{HUMAN_PROMPT} Question: {q}\n{AI_PROMPT} Answer: {a}" - for q, a in zip(self.questions[1:], self.responses[1:]) - ] - ) - history_context = f"{first_pair}\n{subsequent_pairs}" - else: - history_context = "" - - full_context = f"{self.context}\n\n{history_context}\n" - - prompt = f"{HUMAN_PROMPT} {full_context} {HUMAN_PROMPT} {question} {AI_PROMPT}" - - response = self.client.completions.create( - prompt=prompt, - stop_sequences=[HUMAN_PROMPT], - max_tokens_to_sample=6000, - model=self.model, - stream=False, - ) - answer = response.completion - self.questions.append(question) - self.responses.append(answer) - return answer - - def clear_context(self): - self.context = "" - self.questions = [] - self.responses = [] - - def __getstate__(self): - state = self.__dict__.copy() - del state["client"] - return state - - def __setstate__(self, state): - self.__dict__.update(state) - self.client = None - - -def clean_paper_id(raw_id): - # Remove any leading/trailing spaces - cleaned_id = raw_id.strip() - - # Extract paper ID from ArXiv URL if present - match = re.search(r"arxiv\.org\/abs\/([\w\.]+)", cleaned_id) - if match: - cleaned_id = match.group(1) - else: - # Remove trailing dot if present - cleaned_id = re.sub(r"\.$", "", cleaned_id) - - return cleaned_id - - -def load_context(paper_id): - global LEADING_PROMPT - - # Clean the paper_id to remove spaces or extract ID from URL - paper_id = clean_paper_id(paper_id) - - # Check if the paper is already on Hugging Face - latex_source = get_paper_from_huggingface(paper_id) - paper_downloaded = False - - # If not found on Hugging Face, use arxiv_latex_extractor - if not latex_source: - try: - latex_source = get_paper_content(paper_id) - paper_downloaded = True - except Exception as e: - return None, [(f"Error loading paper with id {paper_id}: {e}",)] - - if paper_downloaded: - # Save the LaTeX content to a temporary file - with tempfile.NamedTemporaryFile( - mode="w+", suffix=".tex", delete=False - ) as tmp_file: - tmp_file.write(latex_source) - temp_file_path = tmp_file.name - - # Upload the paper to Hugging Face - try: - if os.path.getsize(temp_file_path) > 1: - hf_api = HfApi(token=os.environ["HUGGINGFACE_TOKEN"]) - - hf_api.upload_file( - path_or_fileobj=temp_file_path, - path_in_repo=f"papers/{paper_id}.tex", - repo_id="taesiri/arxiv_db", - repo_type="dataset", - ) - except Exception as e: - print(f"Error uploading paper with id {paper_id}: {e}") - - # Initialize the Anthropic client and QA model - client = Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) - qa_model = ContextualQA(client, model="claude-2.0") - context = f"{LEADING_PROMPT}\n{latex_source}" - qa_model.load_text(context) - - # Get the paper's title and abstract - title, abstract = get_paper_info(paper_id) - title = replace_texttt(title) - abstract = replace_texttt(abstract) - - return ( - qa_model, - [ - ( - f"Load the paper with id {paper_id}.", - f"\n**Title**: {title}\n\n**Abstract**: {abstract}\n\nPaper loaded. You can now ask questions.", - ) - ], - ) - - -def answer_fn(qa_model, question, chat_history): - # if question is empty, tell user that they need to ask a question - if question == "": - chat_history.append(("No Question Asked", "Please ask a question.")) - return qa_model, chat_history, "" - - client = Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) - qa_model.client = client - - try: - answer = qa_model.ask_question(question) - except Exception as e: - chat_history.append(("Error Asking Question", str(e))) - return qa_model, chat_history, "" - - chat_history.append((question, answer)) - return qa_model, chat_history, "" - - -def clear_context(): - return [] - - -with gr.Blocks( - theme=gr.themes.Soft(), css=custom_css, title="ArXiv QA with Claude" -) as demo: - gr.HTML( - """ -

    - Explore ArXiv Papers in Depth with claude-2.0 - Ask Questions and Get Answers Instantly -

    - """ - ) - # gr.HTML( - # """ - #

    - # Explore the depths of ArXiv papers with our interactive app, powered by the advanced claude-2.0 model. Ask detailed questions and get immediate, context-rich answers from academic papers. - #

    - # """ - # ) - - gr.HTML( - """ -
    - - Duplicate Space - - - Duplicate the Space with your Anthropic API Key  |  - Follow me on Twitter for more updates: @taesiri - -
    - """ - ) - - with gr.Row().style(equal_height=False): - with gr.Column(scale=2, emem_id="column-flex"): - chatbot = gr.Chatbot( - elem_id="chatbot", - avatar_images=("./assets/user.png", "./assets/Claude.png"), - ) - - with gr.Column(scale=1): - paper_id_input = gr.Textbox(label="Enter Paper ID", value="2310.12103") - btn_load = gr.Button("Load Paper") - qa_model = gr.State() - - question_txt = gr.Textbox( - label="Question", lines=5, placeholder="Type your question here..." - ) - - btn_answer = gr.Button("Answer Question") - btn_clear = gr.Button("Clear Chat") - - gr.HTML( - """
    All the inputs are being sent to Anthropic's Claude endpoints. Please refer to this link for privacy policy.
    """ - ) - - gr.Markdown( - "## Acknowledgements\n" - "This project is made possible through the generous support of " - "[Anthropic](https://www.anthropic.com/), who provided free access to the `claude-2.0` API." - ) - - btn_load.click(load_context, inputs=[paper_id_input], outputs=[qa_model, chatbot]) - - btn_answer.click( - answer_fn, - inputs=[qa_model, question_txt, chatbot], - outputs=[qa_model, chatbot, question_txt], - ) - - question_txt.submit( - answer_fn, - inputs=[qa_model, question_txt, chatbot], - outputs=[qa_model, chatbot, question_txt], - ) - - btn_clear.click(clear_context, outputs=[chatbot]) - - -app.mount("/js", StaticFiles(directory="js"), name="js") -gr.mount_gradio_app(app, demo, path="/") diff --git a/spaces/tayyabali1/llama-65b-hf/app.py b/spaces/tayyabali1/llama-65b-hf/app.py deleted file mode 100644 index 124c27225d5dbbb32e1d10a3cd22753817396324..0000000000000000000000000000000000000000 --- a/spaces/tayyabali1/llama-65b-hf/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/decapoda-research/llama-65b-hf").launch() \ No newline at end of file diff --git a/spaces/team-indain-image-caption/Hindi-image-captioning/app.py b/spaces/team-indain-image-caption/Hindi-image-captioning/app.py deleted file mode 100644 index 9f8f0755cf834c1d2a2ea5ab3452c8e779908cc8..0000000000000000000000000000000000000000 --- a/spaces/team-indain-image-caption/Hindi-image-captioning/app.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -import re -import gradio as gr -from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel - - -device = 'cpu' -encoder_checkpoint = 'google/vit-base-patch16-224' -decoder_checkpoint = 'surajp/gpt2-hindi' -model_checkpoint = 'team-indain-image-caption/hindi-image-captioning' -feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint) -tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint) -model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device) - - - -def predict(image,max_length=64, num_beams=4): - image = image.convert('RGB') - image = feature_extractor(image, return_tensors="pt").pixel_values.to(device) - clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0] - caption_ids = model.generate(image, max_length = max_length)[0] - caption_text = clean_text(tokenizer.decode(caption_ids)) - return caption_text - - - -input = gr.inputs.Image(label="Image to search", type = 'pil', optional=False) -output = gr.outputs.Textbox(type="auto",label="Captions") - - -article = "This HuggingFace Space presents a demo for Image captioning in Hindi built with VIT Encoder and GPT2 Decoder" -title = "Hindi Image Captioning System" -examples = [f"./example_{i}.jpg" for i in range(1,5)] - -interface = gr.Interface( - fn=predict, - inputs = input, - theme="grass", - outputs=output, - examples = examples, - title=title, - description=article, - ) -interface.launch(debug=True) \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Algoritmamatematikadiskritpdffree.md b/spaces/terfces0erbo/CollegeProjectV2/Algoritmamatematikadiskritpdffree.md deleted file mode 100644 index 206b9f36cb1abd38bd6eca4a59149ceac60745d1..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Algoritmamatematikadiskritpdffree.md +++ /dev/null @@ -1,6 +0,0 @@ -

    algoritmamatematikadiskritpdffree


    Download File ✫✫✫ https://bytlly.com/2uGm0w



    - - 3cee63e6c2
    -
    -
    -

    diff --git a/spaces/terfces0erbo/CollegeProjectV2/Antonie Iorgovan Tratat De Drept Administrativ Pdf Free.md b/spaces/terfces0erbo/CollegeProjectV2/Antonie Iorgovan Tratat De Drept Administrativ Pdf Free.md deleted file mode 100644 index b6af7a61878e2a4129e4e795889c4690f0ff812a..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Antonie Iorgovan Tratat De Drept Administrativ Pdf Free.md +++ /dev/null @@ -1,9 +0,0 @@ -
    -

    Antonie Iorgovan, (1994). tratat de drept administrativ Volume I-VIII. Common Law Studies, The Hague. The Concept of Law, second edition, Oxford: Calrendon Press.. As for working with the free institutions the management institute of. Antonie Iorgovan, (1991). tratat de drept administrativ. The Concept of law, second edition, Oxford: Calrendon Press.

    -

    antonie iorgovan tratat de drept administrativ pdf free


    DOWNLOAD ►►►►► https://bytlly.com/2uGj6v



    -

    In contrast to the concepts of Non-governmental and not-for-profit organisations, and to those of Non-profit, foundation, or public-private sectoral societies, the concept of.. [33] According to Antonie Iorgovan, the concept of trust belongs rather to the public sphere.

    -

    To Antonie Iorgovan from the first edition of «Drept administrativ» in Bucharest, in the years 1973-1975 Iorgovan Antonie worked as an assistant. The second edition appeared in 1977 under the supervision of the Minister of the Interior Silvian Aldea, after the death of the founder Iorgovan Antonie.1

    -

    Iorgovan A. (1990) Dosarul în Oprimare. De la Legea Bănățean la acțiunea din Dosarul Operației de Anchetă Preliminară (1-3),, in the conception of Antonie Iorgovan the science of administration is a specialized science, which has as object the knowledge of the administrative phenomenology, in its entire complexity, formulating principles and solutions for the permanent improvement of the organizing and functioning of the administrative structures, depending on the command values of the political power, on the economic-social needs, on the degree of technical equipping, on the general level of culture and civilization etc. Iorgovan A. (2002), Tratat de Drept Administrativ, Vol. I, 2nd edition. 

    -

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Free [BETTER]lance 800F Ver 9.2.iso 64 Bit.md b/spaces/terfces0erbo/CollegeProjectV2/Free [BETTER]lance 800F Ver 9.2.iso 64 Bit.md deleted file mode 100644 index 72510987b77ffbbbb295a2a6a80aa4a8aa552fbf..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Free [BETTER]lance 800F Ver 9.2.iso 64 Bit.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Freelance 800F Ver 9.2.iso 64 bit


    Download »»» https://bytlly.com/2uGkvQ



    - -Freelance 800F Ver 9.2.iso 64 Bit DOWNLOAD: https://fancli.com/1gn0yq lancer, lance stewart, lance pokemon, lance armstrong, lancelot, lancet, lance, lancer ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/thecho7/deepfake/training/transforms/__init__.py b/spaces/thecho7/deepfake/training/transforms/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Bajirao Mastani Movie Download 720p Movies A Historical Drama by Sanjay Leela Bhansali.md b/spaces/tialenAdioni/chat-gpt-api/logs/Bajirao Mastani Movie Download 720p Movies A Historical Drama by Sanjay Leela Bhansali.md deleted file mode 100644 index 02299e4fbed65b20c0f5f45e962c53262ea4eb65..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Bajirao Mastani Movie Download 720p Movies A Historical Drama by Sanjay Leela Bhansali.md +++ /dev/null @@ -1,68 +0,0 @@ - -

    How to Download Bajirao Mastani Movie in 720p HD Quality

    -

    Bajirao Mastani is a 2015 Indian historical romance film directed by Sanjay Leela Bhansali. It stars Ranveer Singh, Deepika Padukone and Priyanka Chopra in the lead roles. The film is based on the love story of the Maratha warrior Bajirao and his second wife Mastani, who was a princess of Bundelkhand.

    -

    bajirao mastani movie download 720p movies


    DOWNLOAD ····· https://urlcod.com/2uK5R3



    -

    The film was a critical and commercial success, winning several awards and becoming one of the highest-grossing Indian films of all time. It was praised for its cinematography, costumes, music, performances and direction. The film also received a positive response from international audiences and critics.

    -

    If you are a fan of this epic saga and want to watch it in high definition quality, you might be wondering how to download Bajirao Mastani movie in 720p HD quality. Well, there are several ways to do that, but you need to be careful about the legal and ethical aspects of downloading movies online.

    -

    Legal and Ethical Issues of Downloading Movies Online

    -

    Downloading movies online without paying for them or obtaining the permission of the creators is illegal and unethical. It violates the copyright laws and deprives the filmmakers of their rightful income. It also exposes you to the risk of malware, viruses, phishing and other cyber threats that can harm your device or steal your personal information.

    -

    bajirao mastani full hd movie free download 720p
    -bajirao mastani 720p bluray movie download
    -bajirao mastani hindi movie download 720p quality
    -bajirao mastani movie 720p download filmywap
    -bajirao mastani movie download in 720p resolution
    -bajirao mastani movie free download 720p mkv
    -bajirao mastani movie download 720p tamilrockers
    -bajirao mastani movie download 720p worldfree4u
    -bajirao mastani movie download 720p khatrimaza
    -bajirao mastani movie download 720p bolly4u
    -bajirao mastani movie download 720p pagalworld
    -bajirao mastani movie download 720p moviescounter
    -bajirao mastani movie download 720p mp4
    -bajirao mastani movie download 720p torrent
    -bajirao mastani movie download 720p extramovies
    -bajirao mastani movie download 720p skymovieshd
    -bajirao mastani movie download 720p filmyzilla
    -bajirao mastani movie download 720p coolmoviez
    -bajirao mastani movie download 720p movierulz
    -bajirao mastani movie download 720p jalshamoviez
    -bajirao mastani movie download 720p hdpopcorns
    -bajirao mastani movie download 720p yts
    -bajirao mastani movie download 720p rdxhd
    -bajirao mastani movie download 720p okhatrimaza
    -bajirao mastani movie download 720p filmyhit
    -bajirao mastani movie download 720p moviesflix
    -bajirao mastani movie download 720p hdmovieshub
    -bajirao mastani movie download 720p ssrmovies
    -bajirao mastani movie download 720p dvdvilla
    -bajirao mastani movie download 720p hdfriday
    -bajirao mastani movie download 720p bollyshare
    -bajirao mastani movie download 720p madrasrockers
    -bajirao mastani movie download 720p isaimini
    -bajirao mastani movie download 720p tamilyogi
    -bajirao mastani movie download 720p jio rockers
    -bajirao mastani movie download 720p todaypk
    -bajirao mastani movie download 720p moviesda
    -bajirao mastani movie download 720p kuttyweb
    -bajirao mastani movie download 720p cinemavilla
    -bajirao mastani movie download 720p klwap
    -bajirao mastani movie download 720p dvdwap
    -bajirao mastani movie download 720p mallumv
    -bajirao mastani movie download 720p tamilgun
    -bajirao mastani movie download 720p tamilmv
    -bajirao mastani movie download 720p telugupalaka
    -bajirao mastani movie download 720p moviemad
    -bajirao mastani movie download 720p mp4moviez
    -bajirao mastani movie download 720p filmy4wap
    -bajirao mastani movie download 720p afilmywap

    -

    Therefore, we do not recommend or endorse any illegal or unethical methods of downloading movies online. We advise you to respect the hard work of the filmmakers and support them by watching their movies legally and ethically.

    -

    Legal and Ethical Ways to Download Bajirao Mastani Movie in 720p HD Quality

    -

    There are several legal and ethical ways to download Bajirao Mastani movie in 720p HD quality. Some of them are:

    -
      -
    • Buy or rent the movie from official platforms: You can buy or rent the movie from official platforms like Amazon Prime Video, Google Play Movies, YouTube Movies, iTunes, Netflix etc. These platforms offer high-quality streaming and downloading options for a reasonable price. You can also enjoy other benefits like subtitles, offline viewing, multiple devices etc.
    • -
    • Use a VPN service: If you live in a region where the movie is not available on any official platform, you can use a VPN service to access it from another region where it is available. A VPN service is a software that encrypts your internet traffic and changes your IP address to make it appear as if you are browsing from another location. This way, you can bypass geo-restrictions and access content that is otherwise blocked in your region. However, you need to be careful about choosing a reliable and secure VPN service that does not compromise your privacy or security.
    • -
    • Use a torrent client: Another way to download Bajirao Mastani movie in 720p HD quality is to use a torrent client. A torrent client is a software that allows you to download files from peer-to-peer networks. You need to find a torrent file or magnet link for the movie from a reputable torrent site and then open it with your torrent client. The torrent client will then download the movie from other users who have already downloaded it. However, this method also has some drawbacks. First, you need to be careful about the quality and authenticity of the torrent file or magnet link. Some of them may contain fake or corrupted files that can damage your device or contain malware. Second, you need to be aware of the legal implications of downloading movies from torrents. Some countries have strict laws against piracy and may impose fines or penalties for downloading copyrighted content without permission.
    • -
    -

    In conclusion, there are several ways to download Bajirao Mastani movie in 720p HD quality, but you need to be careful about the legal and ethical issues involved in each method. We hope this article has helped you find the best way for you to enjoy this masterpiece.

    e753bf7129
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Chokher Bali 720p HD movie A complex web of relations and emotions.md b/spaces/tialenAdioni/chat-gpt-api/logs/Chokher Bali 720p HD movie A complex web of relations and emotions.md deleted file mode 100644 index e6347aa3834e61a9049c7064ebe62cd259a5dd70..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Chokher Bali 720p HD movie A complex web of relations and emotions.md +++ /dev/null @@ -1,56 +0,0 @@ -
    -

    Chokher Bali: A Dramatic Tale of Love, Betrayal and Widowhood

    -

    Chokher Bali is a 2003 Indian film based on the novel of the same name by Rabindranath Tagore. It is directed by Rituparno Ghosh and stars Aishwarya Rai, Prasenjit Chatterjee, Raima Sen and Tota Roy Chowdhury. The film explores the complex relationships between a young widow, Binodini, and her friend's husband, Mahendra, as well as his friend, Behari, and his mother, Rajlakshmi. The film is set in the early 20th century Bengal and depicts the social norms and customs of that era.

    -

    The film was critically acclaimed and won several awards, including the National Film Award for Best Feature Film in Bengali and the Golden Leopard at the Locarno International Film Festival. It was also India's official entry for the Academy Award for Best Foreign Language Film, but was not nominated.

    -

    chokher Bali 720p HD movie


    DOWNLOAD ⇒⇒⇒ https://urlcod.com/2uKb1F



    -

    If you are looking for a captivating drama that explores the themes of love, betrayal, widowhood and friendship, you can watch Chokher Bali online on ZEE5[^1^], a streaming platform that offers a variety of Indian movies and shows. You can also download Chokher Bali in HD quality from various websites such as todaymovie.org[^2^] or mkvcinemas.lat[^3^]. However, be aware that these websites may not be legal or safe to use.

    -

    Chokher Bali is a film that will make you think about the complexities of human emotions and the consequences of our actions. It is a film that will stay with you long after you finish watching it.

    - -

    Chokher Bali is not just a film, but a literary masterpiece that has been adapted for the screen. The novel by Rabindranath Tagore was first published in 1903 and is considered one of his finest works. It is part of a series of novels known as the Chaturanga Quartet, which deal with different aspects of love and society. The novel has been translated into many languages and has inspired several adaptations, including a 2019 Hindi play and a 2015 Bengali television series.

    -

    The film by Rituparno Ghosh is faithful to the original novel, but also adds some cinematic elements to enhance the visual appeal and the emotional impact. The film features beautiful cinematography, costumes and music that capture the essence of the period and the culture. The film also boasts of stellar performances by the cast, especially Aishwarya Rai, who plays the role of Binodini with grace and intensity. Rai received much praise for her portrayal of the complex and conflicted character, who is both a victim and a manipulator.

    -

    chokher bali full movie download free
    -watch chokher bali online hd quality
    -chokher bali aishwarya rai bachchan movie
    -chokher bali rituparno ghosh film
    -chokher bali bengali movie with english subtitles
    -chokher bali movie review and ratings
    -chokher bali movie based on rabindranath tagore novel
    -chokher bali movie cast and crew
    -chokher bali movie songs and music
    -chokher bali movie awards and nominations
    -chokher bali movie trailer and teaser
    -chokher bali movie streaming platforms and availability
    -chokher bali movie plot and summary
    -chokher bali movie analysis and interpretation
    -chokher bali movie behind the scenes and trivia
    -chokher bali movie comparison with book
    -chokher bali movie remake and adaptation
    -chokher bali movie controversy and criticism
    -chokher bali movie box office collection and budget
    -chokher bali movie scenes and dialogues
    -chokher bali prosenjit chatterjee performance
    -chokher bali raima sen character and role
    -chokher bali tota roy choudhury biography and filmography
    -chokher bali web series on zee5
    -chokher bali web series cast and episodes
    -chokher bali web series review and ratings
    -chokher bali web series watch online free
    -chokher bali web series download hd quality
    -chokher bali web series director suman mukhopadhyay
    -chokher bali web series difference from movie
    -chokher bali web series vijay varma role and performance
    -chokher bali web series parno mitra character and biography
    -chokher bali web series theme and message
    -chokher bali web series music and songs
    -chokher bali web series trailer and teaser
    -how to watch chokher bali movie online legally
    -best sites to download chokher bali movie hd quality
    -where to find chokher bali movie english subtitles
    -how to stream chokher bali movie on smart tv or mobile device
    -how to buy or rent chokher bali movie online

    -

    Chokher Bali is a film that will appeal to anyone who enjoys a good story with rich characters and emotions. It is a film that will make you appreciate the art of filmmaking and the power of literature. It is a film that you should not miss.

    - -

    If you are interested in watching Chokher Bali, you can find it on ZEE5, where you can stream it online or download it for offline viewing. You can also check out other films and shows by Rituparno Ghosh, who was one of the most acclaimed and influential filmmakers of Indian cinema. Some of his other notable works include Raincoat, Antarmahal, The Last Lear and Noukadubi.

    -

    Chokher Bali is a film that will enrich your mind and touch your heart. It is a film that will make you reflect on the meaning of love, loyalty and freedom. It is a film that you should watch today.

    e753bf7129
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/DRIVERS NOTEBOOK - PHILCO - 14M2-P1243W8-3D Download.md b/spaces/tialenAdioni/chat-gpt-api/logs/DRIVERS NOTEBOOK - PHILCO - 14M2-P1243W8-3D Download.md deleted file mode 100644 index 6cf23b0840d52df10cc448f186cf4b3d1fff6ad5..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/DRIVERS NOTEBOOK - PHILCO - 14M2-P1243W8-3D Download.md +++ /dev/null @@ -1,44 +0,0 @@ -
    -

    How to Download and Install Drivers for Philco 14M2-P1243W8-3D Notebook

    -

    If you have a Philco 14M2-P1243W8-3D notebook, you may need to update your drivers to ensure optimal performance and compatibility with your operating system. Drivers are software components that enable your hardware devices to communicate with your computer. In this article, we will show you how to download and install the drivers for your Philco 14M2-P1243W8-3D notebook in a few simple steps.

    -

    Step 1: Identify your operating system

    -

    The first step is to identify the operating system that you are using on your Philco 14M2-P1243W8-3D notebook. This will help you find the correct drivers for your device. To check your operating system, you can follow these steps:

    -

    DRIVERS NOTEBOOK - PHILCO - 14M2-P1243W8-3D Download


    Download Zip ✺✺✺ https://urlcod.com/2uK8nH



    -
      -
    • Click on the Start button and type "system information" in the search box.
    • -
    • Select the System Information app from the list of results.
    • -
    • Look for the System Type field under the System Summary section. It will tell you whether you have a 32-bit or a 64-bit operating system.
    • -
    -

    Step 2: Visit the driver download page

    -

    The next step is to visit the driver download page for your Philco 14M2-P1243W8-3D notebook. You can access this page by clicking on this link: https://www.driverfiles.net/Laptops-Desktops/Philco/14M2/page,level3,3366,19,250814.html. This page will list all the drivers available for your device, along with their compatible operating systems and download links.

    -

    Step 3: Download the drivers

    -

    The third step is to download the drivers that you need for your Philco 14M2-P1243W8-3D notebook. You can do this by following these steps:

    -
      -
    • Scroll down to the Results section and find the driver that matches your device and operating system.
    • -
    • Click on the driver name to open a new tab with more details about the driver.
    • -
    • Click on the Download button to start downloading the driver file to your computer.
    • -
    • Repeat this process for any other drivers that you need for your device.
    • -
    -

    Step 4: Install the drivers

    -

    The final step is to install the drivers that you have downloaded for your Philco 14M2-P1243W8-3D notebook. You can do this by following these steps:

    -
      -
    • Locate the driver files that you have downloaded on your computer. They should be in your Downloads folder or in a location that you have specified during the download process.
    • -
    • Double-click on each driver file to launch the installation wizard.
    • -
    • Follow the on-screen instructions to complete the installation process.
    • -
    • Restart your computer after installing all the drivers.
    • -
    -

    Congratulations! You have successfully downloaded and installed the drivers for your Philco 14M2-P1243W8-3D notebook. You can now enjoy improved performance and functionality of your device.

    - -

    Troubleshooting tips

    -

    If you encounter any problems with your Philco 14M2-P1243W8-3D notebook after installing the drivers, you can try some of these troubleshooting tips:

    -
      -
    • Make sure that you have installed the correct drivers for your device and operating system. You can check the driver details and compatibility on the driver download page.
    • -
    • Make sure that you have restarted your computer after installing the drivers. This will ensure that the changes take effect.
    • -
    • Make sure that your device is connected properly to your computer. You can check the device status and troubleshoot any issues in the Device Manager.
    • -
    • Make sure that you have updated your operating system to the latest version. This will ensure that your device is compatible with the latest features and security updates.
    • -
    • If none of these tips work, you can contact Philco customer support for further assistance. You can find their contact information on their official website: https://www.philco.com.br/.
    • -
    -

    Conclusion

    -

    In this article, we have shown you how to download and install the drivers for your Philco 14M2-P1243W8-3D notebook. We hope that this guide has been helpful and that you have enjoyed using your device. If you have any questions or feedback, please feel free to leave a comment below.

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Gunday Full Movie Full Hd 1080p In Hindi Watch the Action-Packed Bollywood Thriller Online.md b/spaces/tialenAdioni/chat-gpt-api/logs/Gunday Full Movie Full Hd 1080p In Hindi Watch the Action-Packed Bollywood Thriller Online.md deleted file mode 100644 index 4a9c6dff3c336ad1c253669ae4ff47a6b39aaf3e..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Gunday Full Movie Full Hd 1080p In Hindi Watch the Action-Packed Bollywood Thriller Online.md +++ /dev/null @@ -1,91 +0,0 @@ -
    -

    Gunday Full Movie Full Hd 1080p In Hindi: A Review

    -

    If you are looking for a masala flick with bromance, romance, song sequences, tragic background and what not, then you might want to check out Gunday Full Movie Full Hd 1080p In Hindi. This is a 2014 Bollywood action drama film directed by Ali Abbas Zafar and starring Ranveer Singh, Arjun Kapoor, Priyanka Chopra Jonas and Irrfan Khan. The film is set in Calcutta during its most unsettled times in the 1970s and deals with the inseparable life of Bikram and Bala, two refugees who became gun couriers, coal bandits and eventually Calcutta's most powerful gangsters. But their lives change when Nandita, a beautiful cabaret dancer, enters their world and a counter-force takes charge. In this article, we will review the film and tell you why you should watch it.

    -

    Gunday Full Movie Full Hd 1080p In Hindi


    DOWNLOADhttps://urlcod.com/2uK5JM



    -

    What is Gunday?

    -

    The plot of Gunday

    -

    The film begins with a flashback to the Bangladesh Liberation War of 1971, where two young boys, Bikram and Bala, witness the massacre of their families by the Pakistani army. They escape to Calcutta as refugees and start working as gun couriers for a smuggler named Lateef. They soon become his trusted aides and start stealing coal from the trains. They also befriend a police officer named Satya, who helps them evade arrest. As they grow up, they become notorious coal bandits and establish their own empire. They also become popular among the people as Robin Hood figures who help the poor and needy.

    -

    However, their lives take a turn when they meet Nandita, a cabaret dancer who works for Bangali Babu, a rival coal mafia leader. Both Bikram and Bala fall in love with her and compete for her attention. But Nandita has a secret that will shatter their friendship and loyalty. She is actually an undercover cop who has been assigned to bring down Bikram and Bala's empire. She also reveals that Satya is actually her boss and has been using them for his own agenda. As Bikram and Bala discover the truth, they feel betrayed and decide to fight back. But will their bond survive this test? Will they be able to escape the law? Will they find out who Nandita really loves? These are some of the questions that the film answers in its thrilling climax.

    -

    The cast and crew of Gunday

    -

    The film boasts of a stellar cast that delivers impressive performances. Ranveer Singh and Arjun Kapoor play the roles of Bikram and Bala respectively, and share a great chemistry as friends and rivals. They also showcase their action skills and charisma in the film. Priyanka Chopra Jonas plays the role of Nandita, the femme fatale who seduces both Bikram and Bala. She also displays her dancing talent and glamour in the film. Irrfan Khan plays the role of Satya, the cunning cop who manipulates Bikram and Bala for his own benefit. He also adds a touch of class and gravitas to the film.

    -

    The film is directed by Ali Abbas Zafar, who also wrote the story and screenplay. He has previously directed films like Mere Brother Ki Dulhan (2011) and Sultan (2016). He has also written dialogues for films like Tashan (2008) and Jhoom Barabar Jhoom (2007). He shows his flair for creating an entertaining film that combines action, drama, romance and comedy. The film is produced by Aditya Chopra under his banner Yash Raj Films. The film also features other actors like Saurabh Shukla, Pankaj Tripathi, Victor Banerjee, Anant Vidhaat Sharma and Darshan Gurjar in supporting roles.

    -

    Why watch Gunday?

    -

    The action and drama of Gunday

    -

    One of the main reasons to watch Gunday is its action-packed plot that keeps you hooked till the end. The film has several scenes that showcase the daring exploits of Bikram and Bala as they steal coal from trains, fight with rival gangs, escape from police chases and confront their enemies. The film also has some emotional moments that depict the bond between Bikram and Bala, their love for Nandita, their betrayal by Satya and their ultimate showdown. The film also has some twists and turns that keep you guessing about what will happen next.

    -

    The romance and music of Gunday

    -

    Another reason to watch Gunday is its romantic angle that adds spice to the story. The film has some sizzling scenes between Bikram and Nandita, Bala and Nandita, and even Bikram and Bala as they express their feelings for each other. The film also has some melodious songs that complement the mood of the film. The songs are composed by Sohail Sen with lyrics by Irshad Kamil. Some of the popular songs from the film are "Jashn-e-Ishqa", "Tune Maari Entriyaan", "Jiya", "Asalaam-e-Ishqum" and "Saaiyaan". The songs are sung by singers like Javed Ali, Shadab Faridi, KK, Neeti Mohan, Vishal Dadlani, Bappi Lahiri, Neha Bhasin and Shahid Mallya.

    -

    The historical and cultural context of Gunday

    -

    A third reason to watch Gunday is its historical and cultural backdrop that adds depth to the story. The film is set in Calcutta during its most turbulent times in the 1970s when it was witnessing political unrest, social upheaval, economic crisis and refugee influx. The film portrays how Bikram and Bala represent the spirit of survival and resilience of the people who faced these challenges. The film also depicts how Calcutta was a melting pot of different cultures and communities that influenced its art, music, literature and cuisine. The film also pays tribute to some of the iconic personalities and landmarks of Calcutta such as Rabindranath Tagore, Satyajit Ray, Mother Teresa, Howrah Bridge, Victoria Memorial and Eden Gardens.

    -

    Gunday Hindi Movie Download 1080p Hd Quality
    -Watch Gunday Full Film Online Free Hd 1080p
    -Gunday 2014 Bollywood Movie Hd 1080p Torrent
    -How To Stream Gunday Full Movie In Hindi Hd
    -Gunday Full Hd Movie With English Subtitles 1080p
    -Gunday Hindi Film Review And Ratings Hd 1080p
    -Gunday Full Movie Hd 1080p Watch On Youtube
    -Gunday Bollywood Movie Cast And Crew Hd 1080p
    -Gunday Full Movie Download Link Hd 1080p Hindi
    -Gunday Hindi Movie Songs And Videos Hd 1080p
    -Gunday Full Movie Online Streaming Hd 1080p Free
    -Gunday Bollywood Film Box Office Collection Hd 1080p
    -Gunday Full Movie Hd 1080p Blu Ray Download
    -Gunday Hindi Movie Awards And Nominations Hd 1080p
    -Gunday Full Film Hd 1080p With Hindi Audio
    -Gunday Bollywood Movie Trivia And Facts Hd 1080p
    -Gunday Full Movie Hd 1080p On Netflix India
    -Gunday Hindi Movie Behind The Scenes Hd 1080p
    -Gunday Full Hd Movie Online Watch Dailymotion
    -Gunday Bollywood Film Dialogues And Quotes Hd 1080p
    -Gunday Full Movie Download Filmywap Hd 1080p Hindi
    -Gunday Hindi Movie Best Scenes And Clips Hd 1080p
    -Gunday Full Film Online Free Download Hd 1080p
    -Gunday Bollywood Movie Poster And Wallpaper Hd 1080p
    -Gunday Full Movie Watch Online Hotstar Hd 1080p
    -Gunday Hindi Film Ranveer Singh Performance Hd 1080p
    -Gunday Full Hd Movie Download Pagalworld Hindi
    -Gunday Bollywood Film Arjun Kapoor Role Hd 1080p
    -Gunday Full Movie Online Free No Sign Up Hd 1080p
    -Gunday Hindi Movie Priyanka Chopra Character Hd 1080p
    -Gunday Full Film Download Mp4 Hd 1080p Hindi
    -Gunday Bollywood Movie Irrfan Khan Scene Hd 1080p
    -Gunday Full Movie Online With Subtitles Hd 1080p Free
    -Gunday Hindi Film Shooting Locations Hd 1080p
    -Gunday Full Hd Movie Watch Online Zee5 Hindi
    -Gunday Bollywood Film Songs Lyrics And Meaning Hd 1080p
    -Gunday Full Movie Download Utorrent Hd 1080p Hindi
    -Gunday Hindi Movie Action And Romance Hd 1080p
    -Gunday Full Film Online Watch Amazon Prime Video Hd 1080p
    -Gunday Bollywood Movie Director And Producer Hd 1080p
    -Gunday Full Movie Download Khatrimaza Hd 1080p Hindi
    -Gunday Hindi Film Plot And Storyline Hd 1080p
    -Gunday Full Hd Movie Online Free Putlocker Hindi
    -Gunday Bollywood Film Theme And Message Hd 1080p
    -Gunday Full Movie Download Worldfree4u Hd 1080p Hindi
    -Gunday Hindi Film Comedy And Drama Hd 1080p
    -Gunday Full Film Online Watch Sonyliv Hd 1080p
    -Gunday Bollywood Film Budget And Profit Hd 1080p
    -Gunday Full Movie Download Bolly4u Hd 1080p Hindi
    -Gunday Hindi Film Criticism And Controversy Hd 1080p

    -

    How to watch Gunday?

    -

    The streaming platforms for Gunday

    -

    If you want to watch Gunday online, you have several options to choose from. The film is currently available on Amazon Prime Video, which is a subscription-based service that offers a wide range of movies, shows and originals. You can also watch it on Google Play Movies, YouTube and Apple TV as rent or buy options. These are pay-per-view services that allow you to stream or download the film for a fixed price.

    -

    The download options for Gunday

    -

    If you want to download Gunday offline, you have some options as well. You can use any of the above-mentioned services to download the film on your device after paying the required amount. You can also use some other websites or apps that offer free or pirated downloads of movies. However, we do not recommend or endorse these sources as they may be illegal, unsafe or unethical.

    -

    Conclusion

    -

    Gunday Full Movie Full Hd 1080p In Hindi is a fun-filled and action-packed movie that will entertain you with its story, performances, songs and visuals. It is a tribute to the friendship, love and courage of two men who rose from rags to riches in Calcutta's most turbulent times. It is also a celebration of Calcutta's rich history, culture and diversity. If you are looking for a masala flick with bromance, romance, song sequences, tragic background and what not, then you should definitely watch Gunday.

    -

    FAQs

    -
      -
    • Q: When was Gunday released?
    • -2014 in India and other countries. -
    • Q: How much did Gunday earn at the box office?
    • -
    • A: Gunday earned ₹1.2 billion ($17 million) worldwide and became one of the highest-grossing films of 2014.
    • -
    • Q: Who sang the song "Tune Maari Entriyaan" in Gunday?
    • -
    • A: The song "Tune Maari Entriyaan" was sung by Bappi Lahiri, KK, Neeti Mohan and Vishal Dadlani.
    • -
    • Q: What is the meaning of Gunday?
    • -
    • A: Gunday is a Hindi word that means outlaws, rogues or goons.
    • -
    • Q: Is Gunday based on a true story?
    • -
    • A: No, Gunday is not based on a true story. It is a fictional story inspired by some real events and characters.
    • -
    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/8 Ball Pool APK - The Most Realistic and Fun Pool Game of 2017.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/8 Ball Pool APK - The Most Realistic and Fun Pool Game of 2017.md deleted file mode 100644 index 6b0dff66d6cc12dec1a12bb6a819aa2c0952770e..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/8 Ball Pool APK - The Most Realistic and Fun Pool Game of 2017.md +++ /dev/null @@ -1,150 +0,0 @@ -
    -
    - - -
    -

    How to Download 8 Ball Pool 2017 APK for Android

    -

    Introduction

    -

    Do you love playing pool games on your smartphone or tablet? If yes, then you must have heard of 8 Ball Pool, one of the most popular pool games in the world. 8 Ball Pool is a fun and addictive game that lets you play pool with your friends or other players online. You can customize your cue, table, and balls, and compete in tournaments and leagues for coins and trophies.

    -

    But did you know that there is a way to enjoy an even better version of this game? Yes, we are talking about 8 Ball Pool 2017 APK, a modified version of the original game that offers more features and benefits. In this article, we will show you how to download and install 8 Ball Pool 2017 APK for Android devices.

    -

    8 ball pool 2017 apk download


    Download >>> https://bltlly.com/2uOrTf



    -

    So what are the features of 8 Ball Pool 2017 APK that make it superior to the original game? Here are some of them:

    -
      -
    • You can get unlimited coins and cash to buy anything in the game
    • -
    • You can unlock all the cues, tables, and balls without spending any money
    • -
    • You can access all the game modes and levels without any restrictions
    • -
    • You can play with any player online without any limitations
    • -
    • You can enjoy faster and smoother gameplay without any bugs or glitches
    • -
    -

    As you can see, 8 Ball Pool 2017 APK is a great way to enhance your gaming experience and have more fun. But how can you download and install it on your Android device? Don't worry, we have got you covered. Just follow the steps below and you will be playing 8 Ball Pool 2017 APK in no time.

    -

    How to Download 8 Ball Pool 2017 APK for Android

    -

    Step 1: Enable Unknown Sources on Your Device

    -

    The first thing you need to do is to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, follow these steps:

    -
      -
    1. Go to your device's Settings and tap on Security
    2. -
    3. Find the option that says Unknown Sources and toggle it on
    4. -
    5. A warning message will pop up. Tap on OK to confirm
    6. -
    -

    This will enable unknown sources on your device. You can disable it later after installing 8 Ball Pool 2017 APK.

    -

    Why is this necessary? Well, because 8 Ball Pool 2017 APK is not available on the Google Play Store, you need to download it from a third-party source. And to do that, you need to enable unknown sources on your device.

    -

    Step 2: Download 8 Ball Pool 2017 APK File from a Trusted Source

    -

    The next step is to download the 8 Ball Pool 2017 APK file from a trusted source. There are many websites that offer this file, but not all of them are safe and reliable. Some of them may contain viruses or malware that can harm your device or steal your data.

    -

    To avoid this, we recommend you to download the 8 Ball Pool 2017 APK file from [this link]. This is a verified and secure source that provides the latest and updated version of the file.

    -

    To download the file, follow these steps:

    -

    8 ball pool game free download for android apk
    -8 ball pool mod apk unlimited coins and cash 2017
    -8 ball pool hack apk download latest version 2017
    -8 ball pool old version apk download 2017
    -8 ball pool offline apk download for android 2017
    -8 ball pool apk download for pc windows 7 2017
    -8 ball pool online multiplayer apk download 2017
    -8 ball pool apk download uptodown 2017
    -8 ball pool apk download apkpure 2017
    -8 ball pool apk download revdl 2017
    -8 ball pool long line mod apk download 2017
    -8 ball pool legendary cues mod apk download 2017
    -8 ball pool auto win mod apk download 2017
    -8 ball pool guideline hack apk download 2017
    -8 ball pool instant reward apk download 2017
    -8 ball pool rewards app apk download 2017
    -8 ball pool tool pro apk download free 2017
    -8 ball pool cheat engine apk download android 2017
    -8 ball pool generator no human verification apk download 2017
    -8 ball pool unlimited guideline apk download no root 2017
    -how to download and install 8 ball pool mod apk in android phone in hindi/urdu (2017)
    -how to download and play hacked version of 8 ball pool on iphone/ipad (no jailbreak) (2017)
    -how to download and update latest version of 8 ball pool on facebook gameroom (2017)
    -how to download and use xmodgames for 8 ball pool on android device (rooted) (2017)
    -how to download and apply custom patch for lucky patcher in 8 ball pool (no root) (2017)

    -
      -
    1. Click on [this link] to go to the download page
    2. -
    3. Tap on the Download APK button and wait for the file to be downloaded
    4. -
    5. The file size is about 50 MB, so make sure you have enough space on your device and a stable internet connection
    6. -
    -

    Once the file is downloaded, you need to verify its authenticity. To do this, follow these steps:

    -
      -
    1. Go to your device's File Manager and locate the downloaded file
    2. -
    3. Tap on the file and check its details. It should have the following information:
    4. -
        -
      • Name: 8 Ball Pool 2017.apk
      • -
      • Size: 50 MB
      • -
      • Version: 3.12.4
      • -
      • Publisher: Minniclip.com
      • -
      • Date: June 19, 2023
      • -
      • Signature: C9F5D9B58C0B4E9A6F9E5C52F24A9F4C
      • -
      -
    5. If the file matches these details, then it is authentic and safe to install. If not, then delete it and download it again from [this link]
    6. -
    -

    Step 3: Install 8 Ball Pool 2017 APK on Your Device

    -

    The final step is to install 8 Ball Pool 2017 APK on your device. To do this, follow these steps:

    -
      -
    1. Tap on the downloaded file and a pop-up window will appear. Tap on Install
    2. -
    3. The installation process will begin and it may take a few seconds or minutes depending on your device's performance
    4. -
    5. Once the installation is complete, tap on Open to launch the app or tap on Done to exit the window
    6. -
    7. The app will ask for some permissions and access to your device's features. Tap on Allow to grant them. These are necessary for the app to function properly
    8. -
    -

    Congratulations! You have successfully installed 8 Ball Pool 2017 APK on your device. You can now enjoy playing the game with all the features and benefits that it offers.

    -

    How to Play 8 Ball Pool 2017 on Your Device

    -

    Now that you have installed 8 Ball Pool 2017 APK on your device, you might be wondering how to play it. Don't worry, we have got you covered. Here are the steps to play 8 Ball Pool 2017 on your device:

    -

    Step 1: Launch the App and Sign in with Your Account

    -

    The first step is to launch the app and sign in with your account. To do this, follow these steps:

    -
      -
    1. Tap on the app icon on your device's home screen or app drawer
    2. -
    3. The app will load and show you the main menu. Tap on Play
    4. -
    5. The app will ask you to sign in with your account. You can choose to sign in with your Facebook account, Google account, or Miniclip account. If you don't have any of these accounts, you can also create a new Miniclip account or play as a guest
    6. -
    7. Once you sign in with your account, you will be able to access your profile, achievements, leaderboards, friends, and settings
    8. -
    -

    Signing in with your account has many benefits. You can save your progress, sync your data across devices, earn rewards, chat with other players, and more.

    -

    Step 2: Choose Your Game Mode and Table Type

    -

    The next step is to choose your game mode and table type. To do this, follow these steps:

    -
      -
    1. On the main menu, tap on Play again
    2. -
    3. You will see four game modes to choose from: 1-on-1, Tournament, 9 Ball, and No Guidelines
    4. -
    5. 1-on-1 is the classic mode where you play against another player online. You can choose the table type and the bet amount. The higher the bet, the higher the reward
    6. -
    7. Tournament is the mode where you play against seven other players online in a knockout format. You can choose the table type and the entry fee. The higher the entry fee, the higher the prize pool
    8. -
    9. 9 Ball is the mode where you play against another player online using the 9 ball rules. You can choose the table type and the bet amount. The higher the bet, the higher the reward
    10. -
    11. No Guidelines is the mode where you play against another player online without any guidelines or aim assist. You can choose the table type and the bet amount. The higher the bet, the higher the reward
    12. -
    13. Tap on the game mode that you want to play and then tap on Play Now
    14. -
    15. You will see a list of table types to choose from: London, Sydney, Moscow, Tokyo, Las Vegas, Jakarta, Toronto, Cairo, Dubai, Mumbai, Berlin, and Venice
    16. -
    17. Each table type has a different theme, design, and difficulty level. The higher the difficulty level, the higher the bet amount and reward. You can also see how many players are online on each table type
    18. -
    19. Tap on the table type that you want to play and then tap on Play Now
    20. The app will search for an available opponent online and match you with them. You will see their name, level, country, and profile picture
    21. -
    22. Tap on Ready to start the game or tap on Change Opponent to find another opponent
    23. -
    -

    That's how you choose your game mode and table type. You can also see your stats, achievements, and leaderboards on the main menu. You can also customize your cue, table, and balls by tapping on the Shop icon.

    -

    Step 3: Challenge Your Friends or Other Players Online

    -

    The last step is to challenge your friends or other players online. To do this, follow these steps:

    -
      -
    1. On the main menu, tap on the Friends icon
    2. -
    3. You will see a list of your friends who are online or offline. You can also see their name, level, country, and profile picture
    4. -
    5. Tap on the friend that you want to challenge and then tap on Challenge
    6. -
    7. You will see a list of table types to choose from. Tap on the table type that you want to play and then tap on Play Now
    8. -
    9. The app will send a challenge request to your friend. If they accept it, the game will start. If they decline it or don't respond, you can try again or challenge another friend
    10. -
    11. You can also challenge other players online by tapping on the Play with Friends icon on the main menu. You can enter a unique code or scan a QR code to join a game with another player online
    12. -
    -

    That's how you challenge your friends or other players online. You can also chat with them during the game by tapping on the Chat icon. You can send text messages or emojis to communicate with them.

    -

    Conclusion

    -

    In this article, we have shown you how to download and install 8 Ball Pool 2017 APK for Android devices. We have also shown you how to play 8 Ball Pool 2017 on your device. We hope that you have found this article helpful and informative.

    -

    8 Ball Pool 2017 APK is a great way to enjoy playing pool games on your smartphone or tablet. It offers more features and benefits than the original game, such as unlimited coins and cash, unlocked cues, tables, and balls, all game modes and levels, faster and smoother gameplay, and more.

    -

    If you love playing pool games, then you should definitely try 8 Ball Pool 2017 APK. It is easy to download and install, and it is compatible with most Android devices. It is also safe and secure, as long as you download it from a trusted source.

    -

    So what are you waiting for? Download 8 Ball Pool 2017 APK today and start playing pool with your friends or other players online. You will have a lot of fun and excitement with this game.

    -

    To download 8 Ball Pool 2017 APK for Android devices, click [here]. To learn more about 8 Ball Pool 2017 APK, visit [this website]. To play 8 Ball Pool online, go to [this link].

    -

    Thank you for reading this article. We hope that you have enjoyed it. If you have any questions or feedback, please leave them in the comments section below. We would love to hear from you.

    -

    Frequently Asked Questions (FAQs)

    -

    Here are some of the most frequently asked questions (FAQs) about 8 Ball Pool 2017 APK:

    Q: Is 8 Ball Pool 2017 APK legal?

    -

    A: 8 Ball Pool 2017 APK is not legal, as it is a modified version of the original game that violates the terms and conditions of the game developer. However, it is not illegal either, as it does not involve any hacking or cheating. It is a gray area that depends on your personal preference and risk tolerance.

    -

    Q: Is 8 Ball Pool 2017 APK safe?

    -

    A: 8 Ball Pool 2017 APK is safe, as long as you download it from a trusted source. We have provided you with a verified and secure source that provides the latest and updated version of the file. However, you should always be careful when downloading and installing any app from unknown sources, as they may contain viruses or malware that can harm your device or steal your data.

    -

    Q: How to update 8 Ball Pool 2017 APK?

    -

    A: To update 8 Ball Pool 2017 APK, you need to download and install the latest version of the file from [this link]. You can also check for updates on [this website]. You should always update your app to enjoy the new features and bug fixes.

    -

    Q: How to uninstall 8 Ball Pool 2017 APK?

    -

    A: To uninstall 8 Ball Pool 2017 APK, you need to follow these steps:

    -
      -
    1. Go to your device's Settings and tap on Apps
    2. -
    3. Find and tap on 8 Ball Pool 2017
    4. -
    5. Tap on Uninstall and confirm
    6. -
    -

    This will uninstall 8 Ball Pool 2017 APK from your device. You can also delete the downloaded file from your device's File Manager.

    -

    Q: How to contact the developer of 8 Ball Pool 2017 APK?

    -

    A: To contact the developer of 8 Ball Pool 2017 APK, you can visit [this website] and fill out the contact form. You can also email them at [this address]. You can also follow them on [this social media platform] for updates and news.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Daemon Tools Lite 501 Activation Serial Number.md b/spaces/tioseFevbu/cartoon-converter/scripts/Daemon Tools Lite 501 Activation Serial Number.md deleted file mode 100644 index f67288070b9dd48fb43b52e348db38f27985f51f..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Daemon Tools Lite 501 Activation Serial Number.md +++ /dev/null @@ -1,176 +0,0 @@ - -

    DAEMON Tools Lite 501 Activation Serial Number: How to Get It and Use It

    -

    If you are looking for a powerful and easy-to-use tool to create and mount disc images on your computer, you might have heard of DAEMON Tools Lite. This software allows you to emulate up to four virtual drives, work with various image formats, and access advanced features such as image compression, password protection, and data splitting. But how can you get the most out of this software? And how can you activate it with a serial number?

    -

    In this article, we will show you how to get an activation serial number for DAEMON Tools Lite 501, how to activate it with a serial number, and how to use it after activation. We will also answer some frequently asked questions about this software and provide you with some useful resources for more information and support. By the end of this article, you will be able to enjoy all the benefits of DAEMON Tools Lite 501 without any hassle.

    -

    daemon tools lite 501 activation serial number


    Downloadhttps://urlcod.com/2uHwLs



    -

    How to Get an Activation Serial Number for DAEMON Tools Lite 501

    -

    Before you can activate DAEMON Tools Lite 501 with a serial number, you need to get one first. There are three types of licenses available for DAEMON Tools Lite: Personal License, Commercial License, and Free License. Each license has different features, prices, and terms of use. To find out which one is the best for you, visit the Activation options article on the official website.

    -

    If you want to buy a license online or offline, you can click the Buy button on the main window of the software or go to the Buy page on the official website. You can choose between a one-time payment or a subscription plan, and you can pay with various methods such as credit card, PayPal, or bank transfer. You will receive an email with your serial number and a download link after your payment is confirmed.

    -

    If you want to get a free trial or a giveaway, you can click the Free Download button on the main window of the software or go to the Download page on the official website. You can choose between a 14-day trial or a 20-day giveaway, depending on the availability and your eligibility. You will need to provide your email address and agree to the terms and conditions to get your serial number and download link.

    -

    How to Activate DAEMON Tools Lite 501 with a Serial Number

    -

    Once you have your serial number, you can activate DAEMON Tools Lite 501 with a few simple steps. Here is how to do it:

    -
      -
    1. Run the software and click the License button on the main window.
    2. -
    3. Select the Activate License option and click Next.
    4. -
    5. Enter your serial number in the field and click Next.
    6. -
    7. Follow the on-screen instructions and complete the activation process.
    8. -
    -

    If you encounter any problems during the activation, such as invalid serial number, license expired, or activation failed, you can visit the Troubleshooting article on the official website for some solutions. You can also contact the customer service or technical support of DAEMON Tools Lite 501 for further assistance.

    -

    To check your license status and activate advanced features, you can click the License button on the main window again. You will see your license type, expiration date, and available features. You can also upgrade your license or deactivate your license from this window.

    -

    How to Use DAEMON Tools Lite 501 after Activation

    -

    After you activate DAEMON Tools Lite 501 with a serial number, you can start using it to create and mount disc images on your computer. Here are some of the main functions and features of this software:

    -

    -

    How to Create and Mount Disc Images from Various Formats

    -

    A disc image is a file that contains all the data and information of a physical disc, such as CD, DVD, or Blu-ray. You can create disc images from your original discs or from files and folders on your computer. You can also mount disc images on virtual drives and access them as if they were real discs.

    -

    To create a disc image from a physical disc, follow these steps:

    -
      -
    1. Insert the disc into your optical drive and run DAEMON Tools Lite 501.
    2. -
    3. Select the Create Image option from the main window or from the tray icon menu.
    4. -
    5. Select your optical drive from the drop-down list and choose a destination folder for your image file.
    6. -
    7. Select an image format from the drop-down list. You can choose between ISO, MDS/MDF, MDX, APE/CUE, FLAC/CUE, NRG, or ISZ formats.
    8. -
    9. Click Start and wait for the image creation process to finish.
    10. -
    -

    To create a disc image from files and folders on your computer, follow these steps:

    -
      -
    1. Select the Create Image option from the main window or from the tray icon menu.
    2. -
    3. Select the Data Image option from the pop-up window.
    4. -
    5. Add files and folders to your image by dragging and dropping them or by clicking the Add Files or Add Folder buttons.
    6. -
    7. Select an image format from the drop-down list. You can choose between ISO, MDS/MDF, MDX, APE/CUE, FLAC/CUE, NRG, or ISZ formats.
    8. -
    9. Select a destination folder for your image file and enter a name for it.
    10. -
    11. Click Create and wait for the image creation process to finish.
    12. -
    -

    To mount a disc image on a virtual drive, follow these steps:

    -
      -
    1. Select an empty virtual drive from the main window or from the tray icon menu.
    2. -
    3. Select the Browse... option and locate your image file on your computer.
    4. -
    5. Select your image file and click Open.The image file will be mounted on the virtual drive and you will see its icon and label on the main window and the tray icon menu.
    6. -
    7. You can access the disc image content by double-clicking on the virtual drive or by opening it from your file explorer.
    8. -
    -

    How to Manage Your Image Collection and Customize Settings

    -

    DAEMON Tools Lite 501 allows you to manage your image collection and customize settings according to your preferences. You can organize your images into categories, add tags and comments, edit image properties, and more. You can also change the appearance, behavior, and performance of the software, as well as enable or disable some features.

    -

    To manage your image collection, follow these steps:

    -
      -
    1. Select the Images option from the main window or from the tray icon menu.
    2. -
    3. You will see a list of all your images on the left panel. You can sort them by name, date, size, type, or path.
    4. -
    5. You can create categories by clicking the Add Category button on the bottom left corner. You can name your categories and drag and drop images into them.
    6. -
    7. You can add tags and comments to your images by right-clicking on them and selecting the Edit option. You can enter any text you want in the fields and click OK.
    8. -
    9. You can edit image properties by right-clicking on them and selecting the Properties option. You can change the image name, format, size, protection, and checksum. You can also view the image details such as creation date, source, and location.
    10. -
    -

    To customize settings, follow these steps:

    -
      -
    1. Select the Preferences option from the main window or from the tray icon menu.
    2. -
    3. You will see a window with various tabs for different settings. You can explore each tab and change the options as you wish.
    4. -
    5. Some of the settings you can customize are:
        -
      • The appearance of the software, such as theme, language, font, and icons.
      • -
      • The behavior of the software, such as startup, notifications, hotkeys, and updates.
      • -
      • The performance of the software, such as memory usage, cache size, and compression level.
      • -
      • The features of the software, such as virtual devices, image formats, disc burning, and file associations.
      • -
    6. -
    7. After you make any changes, click OK to save them or Cancel to discard them.
    8. -
    -

    How to Use Virtual Devices and Emulate Disc Protection

    -

    A virtual device is a software simulation of a physical device, such as a CD/DVD drive or a hard disk drive. You can use virtual devices to mount disc images and access their content without having a real device or a real disc. DAEMON Tools Lite 501 allows you to create up to four virtual devices and assign them different letters and names.

    -

    To create a virtual device, follow these steps:

    -
      -
    1. Select the Add Device option from the main window or from the tray icon menu.
    2. -
    3. Select the type of device you want to create: DVD-ROM, HDD, or Floppy Disk Drive.
    4. -
    5. Select a letter and a name for your device and click Add Device.
    6. -
    7. Your virtual device will be created and displayed on the main window and the tray icon menu.
    8. -
    -

    To delete a virtual device, follow these steps:

    -
      -
    1. Select the virtual device you want to delete from the main window or from the tray icon menu.
    2. -
    3. Select the Delete Device option from the pop-up menu.
    4. -
    5. Your virtual device will be deleted and removed from the main window and the tray icon menu.
    6. -
    -

    A disc protection is a mechanism that prevents unauthorized copying or playing of a disc. Some discs have special encryption or authentication methods that make them incompatible with some devices or software. DAEMON Tools Lite 501 allows you to emulate some of these disc protections and bypass their restrictions. Some of the disc protections you can emulate are: Safedisc, Laserlock, RMS, SecuROM, and LaserLock STARFORCE.

    -

    To emulate a disc protection, follow these steps:

    -
      -
    1. Select a virtual device that has an image mounted on it from the main window or from the tray icon menu.
    2. -
    3. Select the Emulation option from the pop-up menu.
    4. -
    5. Select the disc protection you want to emulate from the list. You can select more than one if needed.
    6. -
    7. Your virtual device will emulate the disc protection and you will see a check mark next to it on the list.
    8. -
    -

    Conclusion

    -

    DAEMON Tools Lite 501 is a powerful and easy-to-use tool to create and mount disc images on your computer. It allows you to emulate up to four virtual drives, work with various image formats, and access advanced features such as image compression, password protection, and data splitting. But to get the most out of this software, you need to activate it with a serial number.

    -

    In this article, we showed you how to get an activation serial number for DAEMON Tools Lite 501, how to activate it with a serial number, and how to use it after activation. We also answered some frequently asked questions about this software and provided you with some useful resources for more information and support. We hope you found this article helpful and informative.

    -

    If you want to compare DAEMON Tools Lite 501 with other similar software, you can check out this table below:

    - - - - - - - - - - - - - - - - - - - - - -
    SoftwareFeaturesPrice
    DAEMON Tools Lite 501- Create and mount disc images from various formats
    - Emulate up to four virtual devices
    - Emulate disc protection
    - Compress, protect, and split images
    - Manage image collection and customize settings
    - Personal License: $19.99 (one-time) or $3.99/month (subscription)
    - Commercial License: $29.99 (one-time) or $5.99/month (subscription)
    - Free License: Limited features and ads
    PowerISO- Create and mount disc images from various formats
    - Emulate up to 23 virtual devices
    - Burn, rip, copy, and edit discs
    - Encrypt, compress, and split images
    - Convert image formats
    - Full Version: $29.95 (one-time)
    - Free Version: Limited features and file size
    WinCDEmu- Create and mount disc images from various formats
    - Emulate unlimited number of virtual devices
    - Support for optical discs and hard disk partitions
    - Open-source and lightweight
    - No installation required
    - Free for personal and commercial use
    -

    If you want to learn more about DAEMON Tools Lite 501 or get some help with using it, you can visit these resources:

    -
      -
    • Official Website: The official website of DAEMON Tools Lite 501, where you can buy, download, update, or upgrade the software.
    • -
    • User Manual: The user manual of DAEMON Tools Lite 501, where you can find detailed instructions on how to use the software and its features.
    • -
    • Forum: The forum of DAEMON Tools Lite 501, where you can interact with other users, ask questions, share tips, and report problems.
    • -
    • Blog: The blog of DAEMON Tools Lite 501, where you can find news, articles, tutorials, and reviews related to the software and its topics.
    • -
    • Support Center: The support center of DAEMON Tools Lite 501, where you can contact the customer service or technical support team for any issues or inquiries.
    • -
    -

    FAQs

    -

    Here are some of the frequently asked questions about DAEMON Tools Lite 501:

    -

    Q1: What are the system requirements for DAEMON Tools Lite 501?

    -

    A1: The system requirements for DAEMON Tools Lite 501 are:

    -
      -
    • Operating system: Windows XP/Vista/7/8/10 (32-bit or 64-bit)
    • -
    • Processor: 500 MHz or higher
    • -
    • Memory: 256 MB or higher
    • -
    • Hard disk space: 30 MB or higher
    • -
    • Internet connection: Required for activation and updates
    • -
    • Optical drive: Required for creating images from discs
    • -
    -

    Q2: How can I update or upgrade my DAEMON Tools Lite 501?

    -

    A2: You can update or upgrade your DAEMON Tools Lite 501 by following these steps:

    -
      -
    1. Select the About... option from the main window or from the tray icon menu.
    2. -
    3. Select the Check for updates option from the pop-up window.
    4. -
    5. If there is a new version available, you will see a notification and a download link. Click the link and follow the instructions to install the update.
    6. -
    7. If you have a subscription license, you will also see an option to upgrade to the latest version. Click the option and follow the instructions to upgrade your license and software.
    8. -
    -

    Q3: How can I uninstall or deactivate my DAEMON Tools Lite 501?

    -

    A3: You can uninstall or deactivate your DAEMON Tools Lite 501 by following these steps:

    -
      -
    1. Select the License option from the main window or from the tray icon menu.
    2. -
    3. Select the Deactivate License option and click Next.
    4. -
    5. Follow the on-screen instructions and complete the deactivation process.
    6. -
    7. Go to your Control Panel and select the Add or Remove Programs option.
    8. -
    9. Find DAEMON Tools Lite 501 on the list and click the Uninstall button.
    10. -
    11. Follow the on-screen instructions and complete the uninstallation process.
    12. -
    -

    Q4: How can I contact the customer service or technical support of DAEMON Tools Lite 501?

    -

    A4: You can contact the customer service or technical support of DAEMON Tools Lite 501 by following these steps:

    -
      -
    1. Go to the Support Center page on the official website.
    2. -
    3. Select the type of inquiry you have: Sales, Billing, Licensing, or Tech Support.
    4. -
    5. Fill out the form with your name, email, subject, message, and attachments if any.
    6. -
    7. Click Submit and wait for a response from the team.
    8. -
    -

    Q5: What are some tips and tricks for using DAEMON Tools Lite 501 effectively?

    -

    A5: Here are some tips and tricks for using DAEMON Tools Lite 501 effectively:

    -
      -
    • You can use keyboard shortcuts to perform common tasks faster. For example, you can press Ctrl+D to create an image, Ctrl+M to mount an image, Ctrl+E to eject an image, and Ctrl+L to open the license wizard.
    • -
    • You can use command-line parameters to automate some operations. For example, you can use -mount to mount an image, -unmount to unmount an image, -get_count to get the number of virtual devices, and -help to get a list of all parameters.
    • -
    • You can use plugins to extend the functionality of DAEMON Tools Lite 501. For example, you can use Astroburn Lite to burn discs, Catch! to share files, iSCSI Initiator to connect to remote devices, and Image Editor to edit images.
    • -
    • You can use DAEMON Tools Gadget to access some features from your desktop. For example, you can drag and drop images to mount them, click on virtual devices to open them, and click on buttons to create images, burn discs, or access preferences.
    • -
    • You can use DAEMON Tools Net to share your images and devices with other users over a network. For example, you can create a server and a client, add users and groups, assign permissions and quotas, and access remote images and devices.
    • -

    b2dd77e56b
    -
    -
    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Jesse C Stine Pdf 17.md b/spaces/tioseFevbu/cartoon-converter/scripts/Jesse C Stine Pdf 17.md deleted file mode 100644 index 4d536f101c4bf06edcc0b6f8082f118c7734834f..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Jesse C Stine Pdf 17.md +++ /dev/null @@ -1,28 +0,0 @@ - -

    Who is Jesse C. Stine and Why You Should Read His Book "Insider Buy Superstocks"

    -

    Jesse C. Stine is a self-taught stock trader who claims to have turned $46,000 into $6.8 million in 28 months by investing in "Superstocks" - the stock market's biggest winners. He shares his story and his methods in his book "Insider Buy Superstocks", which was published in 2013.

    -

    Stine has an MBA in economics from Georgia State University, but he learned most of his trading skills through trial and error. He started trading in the mid-1990s, but lost money on penny stocks, biotech stocks, and dotcom stocks. He also suffered from health issues that affected his mental and physical well-being.

    -

    jesse c stine pdf 17


    Download ===> https://urlcod.com/2uHxNQ



    -

    He eventually discovered the CANSLIM system developed by William O'Neil, the founder of Investor's Business Daily. He modified the system to suit his own style and criteria, and focused on finding stocks that had strong earnings growth, high relative strength, low institutional ownership, and insider buying. He also used technical analysis, market timing, risk management, and psychology to enhance his performance.

    -

    Stine's book "Insider Buy Superstocks" contains 25 highlights that summarize his approach and philosophy. Some of them are:

    -
      -
    • How to spot elusive "Superstocks", the stock market's biggest winners.
    • -
    • Free online sources he uses to help him discover future "Superstocks".
    • -
    • Why you can't follow Peter Lynch; how it pays NOT to buy what you know best or what makes you "feel good".
    • -
    • Learn about the most potent "drug" ever developed that will absolutely destroy your investment returns.
    • -
    • The "Canary in a Coalmine" indicator he uses that precedes market crashes within 6 days.
    • -
    • How high-level investment returns require you to go against everything you've ever been taught.
    • -
    • Discover that the market is more manipulated than you could ever imagine.
    • -
    • Why some stocks must be sold at $25.
    • -
    • Why you can never invest in an ETF or mutual fund ever again.
    • -
    • Learn how the "Magic Line" triggers gigantic stock thrusts.
    • -
    • How to dramatically increase your trading focus, creativity, and energy.
    • -
    -

    If you are interested in learning more about Jesse C. Stine and his book "Insider Buy Superstocks", you can download a PDF version of his book from PDF Room[^1^] or read a detailed profile of his trading journey from Trading Reviewers[^2^].

    - -

    Stine claims that his book is not a typical investment book that teaches you how to make 10% or 20% a year. Instead, he aims to show you how to make 1,000% or 10,000% returns by finding and riding the rare "Superstocks" that can make you rich in a short period of time. He also warns that his strategy is not for everyone, as it requires a lot of discipline, patience, courage, and contrarian thinking.

    -

    Stine backs up his claims with real examples of his trades and performance. He shows how he made huge profits by investing in stocks like $CROX, $GMCR, $BIDU, $ISRG, $AAPL, $NFLX, $TSLA, $DDD, and many others. He also reveals his mistakes and losses, and how he learned from them. He shares his insights on market cycles, trends, sentiment, indicators, catalysts, and risk management.

    -

    Stine's book has received mostly positive reviews from readers who appreciate his honesty, simplicity, and generosity. Many readers have reported that his book has changed their perspective on trading and investing, and inspired them to pursue their own "Superstock" dreams. However, some readers have also criticized his book for being too repetitive, self-promotional, or outdated.

    -

    cec2833e83
    -
    -
    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Karaoke CD G Creator Pro 2.3.4 Serial BEST Crack 3.md b/spaces/tioseFevbu/cartoon-converter/scripts/Karaoke CD G Creator Pro 2.3.4 Serial BEST Crack 3.md deleted file mode 100644 index 1c81a105e1b70bc0545b93fb892095afb7316b8d..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Karaoke CD G Creator Pro 2.3.4 Serial BEST Crack 3.md +++ /dev/null @@ -1,22 +0,0 @@ -
    -

    How to Create Your Own Karaoke CD+G Songs with Karaoke CD G Creator Pro 2.3.4

    -

    If you love singing karaoke and want to create your own CD+G songs from scratch or import KAR (MIDI Karaoke) files, you may want to try Karaoke CD G Creator Pro 2.3.4. This is a powerful and easy-to-use CDG Karaoke authoring tool that allows you to use any MP3 or WAV file as a soundtrack, and even remove lead vocal from many CD recordings with Power Vocal Remover. You can also customize the lyrics, graphics, colors, fonts, and synchronization of your karaoke songs, and burn them to CD+G discs or save them as BIN or MP3+G files.

    -

    Karaoke CD G Creator Pro 2.3.4 serial crack 3


    Download File ––– https://urlcod.com/2uHxcb



    -

    However, you may encounter some problems when using Karaoke CD G Creator Pro 2.3.4, such as errors, crashes, or activation issues. That's why some people may look for a serial crack to unlock the full features of the program without paying for it. But this is not a safe or legal way to use the software, as it may expose your computer to viruses, malware, or legal consequences. Moreover, you may not get the latest updates, bug fixes, or technical support from the developers.

    -

    Therefore, we recommend that you download Karaoke CD G Creator Pro 2.3.4 from the official website[^1^] and purchase a license key to activate it legally and safely. You can also try the free trial version before buying to see if it meets your needs. By doing so, you will be able to enjoy creating your own karaoke songs with Karaoke CD G Creator Pro 2.3.4 without any hassle or risk.

    - -

    To use Karaoke CD G Creator Pro 2.3.4, you need to follow these steps:

    -
      -
    1. Download and install the program from the official website[^1^]. You can also download a free trial version to test it before buying.
    2. -
    3. Launch the program and choose whether you want to create a new song from scratch, import a KAR file, or open an existing CD+G file.
    4. -
    5. If you create a new song, you need to select a soundtrack file (MP3 or WAV) and optionally remove the vocals with Power Vocal Remover.
    6. -
    7. Enter the lyrics of the song in the lyrics editor. You can also import them from a text file or look for them on the Internet with the built-in tool.
    8. -
    9. Synchronize the lyrics with the music by using the spacebar or clicking on the Sync button. You can also adjust the timing manually or automatically.
    10. -
    11. Customize the appearance of your karaoke song by choosing the font, color, alignment, and background of the lyrics. You can also add images, countdowns, title and credits screens, and other effects.
    12. -
    13. Preview your karaoke song in the CD+G window and make any necessary changes.
    14. -
    15. Save your karaoke song as a BIN or MP3+G file, or burn it to a CD+G disc with Power CD+G Burner (sold separately).
    16. -
    -

    You can also use Karaoke CD G Creator Pro 2.3.4 to edit existing CD+G songs, convert MIDI Karaoke files to CD+G format, and create duets with split screen and concurrent singer highlighting. For more details and tutorials, you can visit the official website[^1^] or watch some videos on YouTube[^2^].

    -

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/styles/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/styles/__init__.py deleted file mode 100644 index 951ca1794db87ef76d4fd8d9d3e607efd5f335b2..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/styles/__init__.py +++ /dev/null @@ -1,93 +0,0 @@ -""" - pygments.styles - ~~~~~~~~~~~~~~~ - - Contains built-in styles. - - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pip._vendor.pygments.plugin import find_plugin_styles -from pip._vendor.pygments.util import ClassNotFound - - -#: Maps style names to 'submodule::classname'. -STYLE_MAP = { - 'default': 'default::DefaultStyle', - 'emacs': 'emacs::EmacsStyle', - 'friendly': 'friendly::FriendlyStyle', - 'friendly_grayscale': 'friendly_grayscale::FriendlyGrayscaleStyle', - 'colorful': 'colorful::ColorfulStyle', - 'autumn': 'autumn::AutumnStyle', - 'murphy': 'murphy::MurphyStyle', - 'manni': 'manni::ManniStyle', - 'material': 'material::MaterialStyle', - 'monokai': 'monokai::MonokaiStyle', - 'perldoc': 'perldoc::PerldocStyle', - 'pastie': 'pastie::PastieStyle', - 'borland': 'borland::BorlandStyle', - 'trac': 'trac::TracStyle', - 'native': 'native::NativeStyle', - 'fruity': 'fruity::FruityStyle', - 'bw': 'bw::BlackWhiteStyle', - 'vim': 'vim::VimStyle', - 'vs': 'vs::VisualStudioStyle', - 'tango': 'tango::TangoStyle', - 'rrt': 'rrt::RrtStyle', - 'xcode': 'xcode::XcodeStyle', - 'igor': 'igor::IgorStyle', - 'paraiso-light': 'paraiso_light::ParaisoLightStyle', - 'paraiso-dark': 'paraiso_dark::ParaisoDarkStyle', - 'lovelace': 'lovelace::LovelaceStyle', - 'algol': 'algol::AlgolStyle', - 'algol_nu': 'algol_nu::Algol_NuStyle', - 'arduino': 'arduino::ArduinoStyle', - 'rainbow_dash': 'rainbow_dash::RainbowDashStyle', - 'abap': 'abap::AbapStyle', - 'solarized-dark': 'solarized::SolarizedDarkStyle', - 'solarized-light': 'solarized::SolarizedLightStyle', - 'sas': 'sas::SasStyle', - 'stata': 'stata_light::StataLightStyle', - 'stata-light': 'stata_light::StataLightStyle', - 'stata-dark': 'stata_dark::StataDarkStyle', - 'inkpot': 'inkpot::InkPotStyle', - 'zenburn': 'zenburn::ZenburnStyle', - 'gruvbox-dark': 'gruvbox::GruvboxDarkStyle', - 'gruvbox-light': 'gruvbox::GruvboxLightStyle', - 'dracula': 'dracula::DraculaStyle', - 'one-dark': 'onedark::OneDarkStyle', - 'lilypond' : 'lilypond::LilyPondStyle', -} - - -def get_style_by_name(name): - if name in STYLE_MAP: - mod, cls = STYLE_MAP[name].split('::') - builtin = "yes" - else: - for found_name, style in find_plugin_styles(): - if name == found_name: - return style - # perhaps it got dropped into our styles package - builtin = "" - mod = name - cls = name.title() + "Style" - - try: - mod = __import__('pygments.styles.' + mod, None, None, [cls]) - except ImportError: - raise ClassNotFound("Could not find style module %r" % mod + - (builtin and ", though it should be builtin") + ".") - try: - return getattr(mod, cls) - except AttributeError: - raise ClassNotFound("Could not find style class %r in style module." % cls) - - -def get_all_styles(): - """Return a generator for all styles by name, - both builtin and plugin.""" - yield from STYLE_MAP - for name, _ in find_plugin_styles(): - yield name diff --git a/spaces/tmaham/DS-Fusion-Express/README.md b/spaces/tmaham/DS-Fusion-Express/README.md deleted file mode 100644 index 2c201101e02895d07efbf542112279c8c4c7d109..0000000000000000000000000000000000000000 --- a/spaces/tmaham/DS-Fusion-Express/README.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: DS-Fusion -emoji: 🐢 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: true ---- - -This is a demo of our paper DS-Fusion: Artistic Typography via Discriminated and Stylized Diffusion (https://arxiv.org/abs/2303.09604) - -``` -@misc{tanveer2023dsfusion, - title={DS-Fusion: Artistic Typography via Discriminated and Stylized Diffusion}, - author={Maham Tanveer and Yizhi Wang and Ali Mahdavi-Amiri and Hao Zhang}, - year={2023}, - eprint={2303.09604}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` \ No newline at end of file diff --git a/spaces/toiram/goofyai-Leonardo_Ai_Style_Illustration/app.py b/spaces/toiram/goofyai-Leonardo_Ai_Style_Illustration/app.py deleted file mode 100644 index 27bc4e3218de6591df27bb2a2201ef650e09ae93..0000000000000000000000000000000000000000 --- a/spaces/toiram/goofyai-Leonardo_Ai_Style_Illustration/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/goofyai/Leonardo_Ai_Style_Illustration").launch() \ No newline at end of file diff --git a/spaces/tomofi/MMOCR/mmocr/models/kie/extractors/__init__.py b/spaces/tomofi/MMOCR/mmocr/models/kie/extractors/__init__.py deleted file mode 100644 index 914d0f6903cefec1236107346e59901ac9d64fd4..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/kie/extractors/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .sdmgr import SDMGR - -__all__ = ['SDMGR'] diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py deleted file mode 100644 index 4915316027da880f56aa414754099b889aa26e2d..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import itertools - -import torch -from torch.utils.data.sampler import BatchSampler -from torch.utils.data.sampler import Sampler - - -class GroupedBatchSampler(BatchSampler): - """ - Wraps another sampler to yield a mini-batch of indices. - It enforces that elements from the same group should appear in groups of batch_size. - It also tries to provide mini-batches which follows an ordering which is - as close as possible to the ordering from the original sampler. - - Arguments: - sampler (Sampler): Base sampler. - batch_size (int): Size of mini-batch. - drop_uneven (bool): If ``True``, the sampler will drop the batches whose - size is less than ``batch_size`` - - """ - - def __init__(self, sampler, group_ids, batch_size, drop_uneven=False): - if not isinstance(sampler, Sampler): - raise ValueError( - "sampler should be an instance of " - "torch.utils.data.Sampler, but got sampler={}".format(sampler) - ) - self.sampler = sampler - self.group_ids = torch.as_tensor(group_ids) - assert self.group_ids.dim() == 1 - self.batch_size = batch_size - self.drop_uneven = drop_uneven - - self.groups = torch.unique(self.group_ids).sort(0)[0] - - self._can_reuse_batches = False - - def _prepare_batches(self): - dataset_size = len(self.group_ids) - # get the sampled indices from the sampler - sampled_ids = torch.as_tensor(list(self.sampler)) - # potentially not all elements of the dataset were sampled - # by the sampler (e.g., DistributedSampler). - # construct a tensor which contains -1 if the element was - # not sampled, and a non-negative number indicating the - # order where the element was sampled. - # for example. if sampled_ids = [3, 1] and dataset_size = 5, - # the order is [-1, 1, -1, 0, -1] - order = torch.full((dataset_size,), -1, dtype=torch.int64) - order[sampled_ids] = torch.arange(len(sampled_ids)) - - # get a mask with the elements that were sampled - mask = order >= 0 - - # find the elements that belong to each individual cluster - clusters = [(self.group_ids == i) & mask for i in self.groups] - # get relative order of the elements inside each cluster - # that follows the order from the sampler - relative_order = [order[cluster] for cluster in clusters] - # with the relative order, find the absolute order in the - # sampled space - permutation_ids = [s[s.sort()[1]] for s in relative_order] - # permute each cluster so that they follow the order from - # the sampler - permuted_clusters = [sampled_ids[idx] for idx in permutation_ids] - - # splits each cluster in batch_size, and merge as a list of tensors - splits = [c.split(self.batch_size) for c in permuted_clusters] - merged = tuple(itertools.chain.from_iterable(splits)) - # now each batch internally has the right order, but - # they are grouped by clusters. Find the permutation between - # different batches that brings them as close as possible to - # the order that we have in the sampler. For that, we will consider the - # ordering as coming from the first element of each batch, and sort - # correspondingly - first_element_of_batch = [t[0].item() for t in merged] - # get and inverse mapping from sampled indices and the position where - # they occur (as returned by the sampler) - inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())} - # from the first element in each batch, get a relative ordering - first_index_of_batch = torch.as_tensor( - [inv_sampled_ids_map[s] for s in first_element_of_batch] - ) - - # permute the batches so that they approximately follow the order - # from the sampler - permutation_order = first_index_of_batch.sort(0)[1].tolist() - # finally, permute the batches - batches = [merged[i].tolist() for i in permutation_order] - - if self.drop_uneven: - kept = [] - for batch in batches: - if len(batch) == self.batch_size: - kept.append(batch) - batches = kept - return batches - - def __iter__(self): - if self._can_reuse_batches: - batches = self._batches - self._can_reuse_batches = False - else: - batches = self._prepare_batches() - self._batches = batches - return iter(batches) - - def __len__(self): - if not hasattr(self, "_batches"): - self._batches = self._prepare_batches() - self._can_reuse_batches = True - return len(self._batches) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/pisa_retinanet_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/pisa_retinanet_head.py deleted file mode 100644 index bd87b9aeb07e05ff94b444ac8999eca3f616711a..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/pisa_retinanet_head.py +++ /dev/null @@ -1,154 +0,0 @@ -import torch -from mmcv.runner import force_fp32 - -from mmdet.core import images_to_levels -from ..builder import HEADS -from ..losses import carl_loss, isr_p -from .retina_head import RetinaHead - - -@HEADS.register_module() -class PISARetinaHead(RetinaHead): - """PISA Retinanet Head. - - The head owns the same structure with Retinanet Head, but differs in two - aspects: - 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to - change the positive loss weights. - 2. Classification-aware regression loss is adopted as a third loss. - """ - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes of each image - with shape (num_obj, 4). - gt_labels (list[Tensor]): Ground truth labels of each image - with shape (num_obj, 4). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. - Default: None. - - Returns: - dict: Loss dict, comprise classification loss, regression loss and - carl loss. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.anchor_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - return_sampling_results=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors and flags to a single tensor - concat_anchor_list = [] - for i in range(len(anchor_list)): - concat_anchor_list.append(torch.cat(anchor_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - - num_imgs = len(img_metas) - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) - for cls_score in cls_scores - ] - flatten_cls_scores = torch.cat( - flatten_cls_scores, dim=1).reshape(-1, - flatten_cls_scores[0].size(-1)) - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) - for bbox_pred in bbox_preds - ] - flatten_bbox_preds = torch.cat( - flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) - flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) - flatten_label_weights = torch.cat( - label_weights_list, dim=1).reshape(-1) - flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) - flatten_bbox_targets = torch.cat( - bbox_targets_list, dim=1).reshape(-1, 4) - flatten_bbox_weights = torch.cat( - bbox_weights_list, dim=1).reshape(-1, 4) - - # Apply ISR-P - isr_cfg = self.train_cfg.get('isr', None) - if isr_cfg is not None: - all_targets = (flatten_labels, flatten_label_weights, - flatten_bbox_targets, flatten_bbox_weights) - with torch.no_grad(): - all_targets = isr_p( - flatten_cls_scores, - flatten_bbox_preds, - all_targets, - flatten_anchors, - sampling_results_list, - bbox_coder=self.bbox_coder, - loss_cls=self.loss_cls, - num_class=self.num_classes, - **self.train_cfg.isr) - (flatten_labels, flatten_label_weights, flatten_bbox_targets, - flatten_bbox_weights) = all_targets - - # For convenience we compute loss once instead separating by fpn level, - # so that we don't need to separate the weights by level again. - # The result should be the same - losses_cls = self.loss_cls( - flatten_cls_scores, - flatten_labels, - flatten_label_weights, - avg_factor=num_total_samples) - losses_bbox = self.loss_bbox( - flatten_bbox_preds, - flatten_bbox_targets, - flatten_bbox_weights, - avg_factor=num_total_samples) - loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - - # CARL Loss - carl_cfg = self.train_cfg.get('carl', None) - if carl_cfg is not None: - loss_carl = carl_loss( - flatten_cls_scores, - flatten_labels, - flatten_bbox_preds, - flatten_bbox_targets, - self.loss_bbox, - **self.train_cfg.carl, - avg_factor=num_total_pos, - sigmoid=True, - num_class=self.num_classes) - loss_dict.update(loss_carl) - - return loss_dict diff --git a/spaces/triple-t/ttt-space/static/_app/immutable/chunks/2-5e47ff79.js b/spaces/triple-t/ttt-space/static/_app/immutable/chunks/2-5e47ff79.js deleted file mode 100644 index e88a391d48a070695f5faa16da18120c0df5b65e..0000000000000000000000000000000000000000 --- a/spaces/triple-t/ttt-space/static/_app/immutable/chunks/2-5e47ff79.js +++ /dev/null @@ -1 +0,0 @@ -import{_ as r}from"./_page-da46b06b.js";import{default as t}from"../components/pages/_page.svelte-033df9bc.js";export{t as component,r as universal}; diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/tissue_tool.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/tissue_tool.py deleted file mode 100644 index 2035e4463a880dc673b01d8900d6a73f981d1912..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/tissue_tool.py +++ /dev/null @@ -1,157 +0,0 @@ -''' -从病理图中获得主要组织区域的轮廓。 -''' - -import cv2 -import numpy as np -try: - from .contour_tool import find_contours, draw_contours, resize_contours - from .opsl_im_tool import make_thumb_any_level -except (ModuleNotFoundError, ImportError): - from contour_tool import find_contours, draw_contours, resize_contours - from opsl_im_tool import make_thumb_any_level - - -def get_tissue_contours_default_postprocess(tim: np.ndarray): - ''' - 默认的轮廓布尔图后处理函数 - :param tim: - :return: - ''' - k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) - cv2.dilate(tim, k, dst=tim, iterations=2) - cv2.erode(tim, k, dst=tim, iterations=4) - cv2.dilate(tim, k, dst=tim, iterations=2) - return tim - - -def get_tissue_contours(im, - gray_thresh=210, - channel_diff_thresh=18, - area_thresh=0.005, - *, - postprocess_func=get_tissue_contours_default_postprocess, - debug_show=False): - """ - 从一张图像中获得组织轮廓 - :param im: opsl缩略图,要求为RGB的nd.ndarray类型 - :param gray_thresh: 灰度差异,像素的任意通道值小于该值,该像素认为是组织 - :param channel_diff_thresh: 通道差异,像素的全部通道的最大值减最小值得到通道差异若大于该值,该像素认为是组织 - :param area_thresh: 面积阈值,组织区域占全图百分比若大于等于该值,则保留该组织区域 - :param postprocess_func: 组织布尔图的后处理函数 - :param debug_show: 是否显示调试数据 - :return: tissue_contours - """ - assert isinstance(im, np.ndarray) - assert gray_thresh is not None or channel_diff_thresh is not None, 'Error! Only one can be None between gray_thresh and channel_diff_thresh' - imhw = im.shape[:2] - - if gray_thresh is None: - tim1 = np.ones(imhw, dtype=np.uint8) - else: - tim1 = np.any(im <= gray_thresh, 2).astype(np.uint8) - - if channel_diff_thresh is None: - tim2 = np.ones(imhw, dtype=np.uint8) - else: - tim2 = (np.max(im, 2) - np.min(im, 2) > channel_diff_thresh).astype(np.uint8) - - tim = tim1 * tim2 - - # gim = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY) - # tim = (gim < gray_thresh).astype(np.uint8) - - if debug_show: - cv2.imshow('get_tissue_ori', im[..., ::-1]) - # cv2.imshow('get_tissue_gim', gim) - cv2.imshow('get_tissue_mult', tim*255) - cv2.imshow('get_tissue_gray', tim1*255) - cv2.imshow('get_tissue_channel_diff', tim2*255) - - tim = postprocess_func(tim) - - if debug_show: - cv2.imshow('get_tissue_postprocess', tim*255) - - contours = find_contours(tim, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - tissue_contours = [] - - if area_thresh is None: - tissue_contours = contours - else: - for i, cont in enumerate(contours): - contour_area = cv2.contourArea(cont) - factor = float(contour_area) / np.prod(im.shape[:2], dtype=np.float) - if debug_show: - print(contour_area, '{:.3f}'.format(factor)) - if factor > area_thresh: - tissue_contours.append(cont) - - if debug_show: - mask = np.zeros([im.shape[0], im.shape[1], 1], dtype=np.uint8) - mask = draw_contours(mask, tissue_contours, [1], thickness=-1) - cv2.imshow('mask', mask*255) - - return tissue_contours - - -def get_tissue_contours_with_big_pic(opsl_im, - gray_thresh=210, - channel_diff_thresh=18, - area_thresh=0.005, - *, - postprocess_func=get_tissue_contours_default_postprocess, - thumb_size=768, - debug_show=False): - ''' - 对大图获得组织轮廓,默认参数即可对HE图像工作良好 - 灰度差异与通道差异与面积阈值 是与关系 - 可以单独设定 gray_thresh 或 channel_diff_thresh 或 area_thresh 为 None,代表关闭对应过滤 - gray_thresh 和 channel_diff_thresh 必须至少有一个不为None - :param opsl_im: OpenSlide或兼容图像类型 - :param gray_thresh: 灰度差异,像素的任意通道值小于该值,该像素认为是组织 - :param channel_diff_thresh: 通道差异,像素的全部通道的最大值减最小值得到通道差异若大于该值,该像素认为是组织 - :param area_thresh: 面积阈值,组织区域占全图百分比若大于等于该值,则保留该组织区域 - :param postprocess_func: 组织布尔图的后处理函数 - :param thumb_size: 计算大图轮廓时,取的缩略图的最长边的大小 - :param debug_show: 设定为True可以启动调试功能 - :return: - ''' - thumb = make_thumb_any_level(opsl_im, thumb_size=thumb_size) - tissue_contours = get_tissue_contours(thumb, gray_thresh=gray_thresh, channel_diff_thresh=channel_diff_thresh, area_thresh=area_thresh, - postprocess_func=postprocess_func, debug_show=debug_show) - thumb_hw = thumb.shape[:2] - factor_hw = np.array(opsl_im.level_dimensions[0][::-1], dtype=np.float32) / thumb_hw - resized_contours = resize_contours(tissue_contours, factor_hw) - return resized_contours - - -def get_custom_contours_with_big_pic(opsl_im, thumb_size, get_contours_func, **get_contours_func_kwargs): - ''' - 对大图获得组织轮廓,使用自定义函数 - :param opsl_im: OpenSlide或兼容图像类型 - :param get_contours_func: 自定义获取轮廓的函数 - :param get_contours_func_kwargs: 自定义获取轮廓的函数的参数 - :return: - ''' - thumb = make_thumb_any_level(opsl_im, thumb_size=thumb_size) - tissue_contours = get_contours_func(thumb, **get_contours_func_kwargs) - thumb_hw = thumb.shape[:2] - factor_hw = np.array(opsl_im.level_dimensions[0][::-1], dtype=np.float32) / thumb_hw - resized_contours = resize_contours(tissue_contours, factor_hw) - return resized_contours - - -if __name__ == '__main__': - import glob - import os - if os.name == 'nt': - openslide_bin = os.path.split(__file__)[0]+'/../bin_openslide_x64_20171122' - if os.path.isdir(openslide_bin): - os.add_dll_directory(openslide_bin) - import openslide as opsl - - for im_path in glob.glob('dataset/ims/*.ndpi', recursive=True): - im = opsl.OpenSlide(im_path) - get_tissue_contours_with_big_pic(im, gray_thresh=210, channel_diff_thresh=4, area_thresh=0.001, thumb_size=1024, debug_show=True) - cv2.waitKey(0) diff --git a/spaces/ubamba98/clipsimilarimagesearch/README.md b/spaces/ubamba98/clipsimilarimagesearch/README.md deleted file mode 100644 index 4864b5ecd10969b9b7f73b71d13bb8d890808796..0000000000000000000000000000000000000000 --- a/spaces/ubamba98/clipsimilarimagesearch/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Clipsimilarimagesearch -emoji: 💩 -colorFrom: purple -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Autosync for Google Drive v4.4.4 [Ultimate] APK Free Download How It Works and How to Use It.md b/spaces/usbethFlerru/sovits-modelsV2/example/Autosync for Google Drive v4.4.4 [Ultimate] APK Free Download How It Works and How to Use It.md deleted file mode 100644 index 2569211b6c01ec90523146dc324f8b22ad4ca262..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Autosync for Google Drive v4.4.4 [Ultimate] APK Free Download How It Works and How to Use It.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Autosync for Google Drive v4.4.4 [Ultimate] APK Free Download Free Download


    Download ››››› https://urlcod.com/2uyUWb



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Bluesoleil 10 Crack Serial 20 Finger Geburtstagsgr.md b/spaces/usbethFlerru/sovits-modelsV2/example/Bluesoleil 10 Crack Serial 20 Finger Geburtstagsgr.md deleted file mode 100644 index d9d319b97fb3a8cd8c705acf3a9adf07d11e94be..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Bluesoleil 10 Crack Serial 20 Finger Geburtstagsgr.md +++ /dev/null @@ -1,117 +0,0 @@ - -

    Bluesoleil 10 Crack Serial 20 finger geburtstagsgr: How to Download and Activate the Ultimate Bluetooth Solution for Windows

    - -

    Bluetooth is a wireless technology that allows you to connect various devices such as mobile phones, headsets, keyboards, mice, printers, and more to your computer. However, not all Bluetooth applications are created equal. Some are outdated, buggy, or incompatible with your system. If you are looking for a reliable, powerful, and easy-to-use Bluetooth software for Windows, you should try Bluesoleil 10.

    -

    Bluesoleil 10 Crack Serial 20 finger geburtstagsgr


    Downloadhttps://urlcod.com/2uyXoy



    - -

    Bluesoleil 10 is one of the most popular and professional Windows-based Bluetooth applications, developed by IVT with great innovation and independence. It supports Bluetooth 4.0 low energy technology and is compatible with Windows 8/8.1/Windows 10. It has improved functions and optimized performance compared to previous versions. It can fulfill the demands of integrating a diverse array of Bluetooth enabled digital devices, such as mobile phones, headsets, printers, keyboards, and so on. You can also establish networks and exchange data with other Bluetooth enabled computers.

    - -

    However, Bluesoleil 10 is not a free software. You need to purchase a license key to activate it and enjoy its full features. But what if you don't want to spend money on it? Is there a way to get Bluesoleil 10 crack serial 20 finger geburtstagsgr for free? The answer is yes. In this article, we will show you how to download and activate Bluesoleil 10 crack serial 20 finger geburtstagsgr without paying anything.

    - -

    How to Download Bluesoleil 10 Crack Serial 20 finger geburtstagsgr

    - -

    The first step is to download Bluesoleil 10 crack serial 20 finger geburtstagsgr from a reliable source. There are many websites that claim to offer Bluesoleil 10 crack serial 20 finger geburtstagsgr for free, but some of them may contain viruses, malware, or adware that can harm your computer. Therefore, you need to be careful and choose a trusted site.

    - -

    One of the best sites that we recommend is freeprosoftz.com. This site provides cracked software, games, nulled scripts, free premium WordPress themes and plugins, and more. It also offers Bluesoleil 10 crack serial 20 finger geburtstagsgr for free download. You can find it by searching for "IVT BlueSoleil 10.0.498.0 Crack 2023 With Activation Key [Latest]" on the site.

    - -

    Once you find it, click on the "Download Now" button and follow the instructions to download the file. The file size is about 140 MB and it contains the setup file and the crack file. You will need to extract the file using WinRAR or any other file compression tool.

    -

    - -

    How to Activate Bluesoleil 10 Crack Serial 20 finger geburtstagsgr

    - -

    The next step is to install and activate Bluesoleil 10 crack serial 20 finger geburtstagsgr on your computer. Here are the steps to follow:

    - -
      -
    1. Run the setup file and follow the installation wizard to install Bluesoleil 10 on your computer.
    2. -
    3. Do not launch the program after installation.
    4. -
    5. Copy the crack file from the downloaded folder and paste it into the installation directory of Bluesoleil 10. The default location is C:\Program Files (x86)\IVT Corporation\BlueSoleil.
    6. -
    7. Replace the original file if prompted.
    8. -
    9. Run the program as administrator.
    10. -
    11. Enter any serial number when asked for activation. You can use any random number or use one of these:
        -
      • 9F9E-4D59-BC6C-37A9-37EA
      • -
      • B0B7-5FC5-3333-5F0A-0A0A
      • -
      • C0C0-1F1F-1111-7F0A-0A0A
      • -
    12. -
    13. Click on "Activate" and wait for a few seconds.
    14. -
    15. You have successfully activated Bluesoleil 10 crack serial 20 finger geburtstagsgr for free.
    16. -
    - -

    Congratulations! You can now enjoy the full features of Bluesoleil 10 crack serial 20 finger geburtstagsgr on your computer. You can connect all your Bluetooth devices in one screen, transfer files, make calls, send messages, sync contacts, play music, and more with ease.

    - -

    Conclusion

    - -

    Bluesoleil 10 is a great Bluetooth software for Windows that can help you integrate various Bluetooth devices with your computer. However, it is not a free software and you need to purchase a license key to activate it. If you don't want to spend money on it, you can use Bluesoleil 10 crack serial 20 finger geburtstagsgr to get it for free.

    - -

    In this article, we have shown you how to download and activate Bluesoleil 10 crack serial 20 finger geburtstagsgr without paying anything. We have also provided you with some serial numbers that you can use for activation. However, we do not encourage or support piracy or illegal use of software. This article is for educational purposes only and we are not responsible for any consequences that may arise from using Bluesoleil 10 crack serial 20 finger geburtstagsgr.

    - -

    If you like Bluesoleil 10 and find it useful, we suggest you buy a genuine license key from the official website of IVT Corporation. This way, you can support the developers and get regular updates and technical support for your software.

    - -

    We hope this article has helped you learn how to download and activate Bluesoleil 10 crack serial 20 finger geburtstagsgr for free. If you have any questions or suggestions, feel free to leave a comment below.

    -

    Why You Should Use Bluesoleil 10 Crack Serial 20 finger geburtstagsgr

    - -

    There are many reasons why you should use Bluesoleil 10 crack serial 20 finger geburtstagsgr instead of other Bluetooth software. Here are some of them:

    - -
      -
    • It is compatible with most Bluetooth devices and supports 24 different Bluetooth functions, such as Find Me, Proximity, Health Thermometer, Heart Rate, HID OVER GATT, and more.
    • -
    • It has a user-friendly interface that allows you to manage all your Bluetooth devices in one screen. You can easily pair, connect, disconnect, send, receive, and browse files with your Bluetooth devices.
    • -
    • It has a high-quality sound transmission for both A2DP and SCO profiles. You can enjoy wireless music and voice calls with your Bluetooth headsets or speakers.
    • -
    • It has a multi-profile feature that allows you to use multiple Bluetooth devices simultaneously. You can switch between different devices without any hassle.
    • -
    • It has a backup and restore feature that allows you to backup and restore your contacts and messages from your Bluetooth-enabled mobile phones to your computer. You can also view, edit, delete, and upload contacts on your computer.
    • -
    - -

    These are just some of the benefits of using Bluesoleil 10 crack serial 20 finger geburtstagsgr. There are many more features that you can discover by yourself once you download and activate it for free.

    - -

    How to Use Bluesoleil 10 Crack Serial 20 finger geburtstagsgr

    - -

    Using Bluesoleil 10 crack serial 20 finger geburtstagsgr is very easy and simple. Here are some steps to guide you:

    - -
      -
    1. Make sure your Bluetooth device is turned on and discoverable.
    2. -
    3. Launch Bluesoleil 10 on your computer and wait for it to detect your Bluetooth device.
    4. -
    5. Double-click on your Bluetooth device icon to pair it with your computer. Enter the same PIN code on both devices if required.
    6. -
    7. Once paired, you can see the services that your Bluetooth device supports on the right side of the screen. You can also see the status of your Bluetooth device on the bottom of the screen.
    8. -
    9. To use a service, simply drag and drop the service icon onto your Bluetooth device icon. For example, if you want to send a file to your mobile phone, drag and drop the File Transfer icon onto your mobile phone icon.
    10. -
    11. A dialog box will appear where you can select the file that you want to send and click on "Send". You can also browse the files on your mobile phone by clicking on "Browse Device".
    12. -
    13. To disconnect a service, right-click on your Bluetooth device icon and select "Disconnect". To unpair a device, right-click on your Bluetooth device icon and select "Unpair".
    14. -
    - -

    That's it! You can now use Bluesoleil 10 crack serial 20 finger geburtstagsgr to connect all your Bluetooth devices with your computer and enjoy wireless communication and entertainment.

    -

    What are the Risks of Using Bluesoleil 10 Crack Serial 20 finger geburtstagsgr

    - -

    While using Bluesoleil 10 crack serial 20 finger geburtstagsgr may seem tempting and convenient, it is not without risks. There are some drawbacks and dangers that you should be aware of before using it. Here are some of them:

    - -
      -
    • It is illegal and unethical. Using Bluesoleil 10 crack serial 20 finger geburtstagsgr is a violation of the intellectual property rights of IVT Corporation, the developer of Bluesoleil 10. You are stealing their software and depriving them of their rightful income. This is not only unfair, but also punishable by law. You may face legal consequences such as fines, lawsuits, or even jail time if you are caught using Bluesoleil 10 crack serial 20 finger geburtstagsgr.
    • -
    • It is unsafe and unreliable. Using Bluesoleil 10 crack serial 20 finger geburtstagsgr may expose your computer to viruses, malware, or adware that can damage your system or compromise your data. You may also encounter errors, bugs, or crashes that can affect your Bluetooth performance or cause data loss. You may not be able to use some features or services that require online verification or registration. You may also lose your warranty or technical support from IVT Corporation if you use Bluesoleil 10 crack serial 20 finger geburtstagsgr.
    • -
    • It is outdated and unsupported. Using Bluesoleil 10 crack serial 20 finger geburtstagsgr means that you are using an old version of Bluesoleil 10 that may not be compatible with the latest Bluetooth devices or Windows updates. You may miss out on the new features, improvements, or bug fixes that IVT Corporation releases for Bluesoleil 10. You may also not be able to update your software or get help from the official website or customer service if you use Bluesoleil 10 crack serial 20 finger geburtstagsgr.
    • -
    - -

    These are just some of the risks of using Bluesoleil 10 crack serial 20 finger geburtstagsgr. There may be more that you are not aware of. Therefore, we advise you to avoid using Bluesoleil 10 crack serial 20 finger geburtstagsgr and opt for a legal and safe way to use Bluesoleil 10.

    - -

    How to Get a Genuine License Key for Bluesoleil 10

    - -

    The best way to use Bluesoleil 10 is to get a genuine license key from the official website of IVT Corporation. This way, you can enjoy all the benefits of Bluesoleil 10 without any risks or limitations. Here are some steps to get a genuine license key for Bluesoleil 10:

    - -
      -
    1. Go to the official website of IVT Corporation at https://www.bluesoleil.com/.
    2. -
    3. Click on the "Buy Now" button on the top right corner of the screen.
    4. -
    5. Select the edition of Bluesoleil 10 that suits your needs and click on "Add to Cart". You can choose between Standard Edition ($27.99) or Professional Edition ($42.99).
    6. -
    7. Enter your billing information and payment method and click on "Place Order". You can pay by credit card, PayPal, or bank transfer.
    8. -
    9. Check your email for the confirmation and invoice of your order. You will also receive your license key and download link for Bluesoleil 10.
    10. -
    11. Download and install Bluesoleil 10 on your computer using the download link provided.
    12. -
    13. Launch Bluesoleil 10 and enter your license key when asked for activation.
    14. -
    15. You have successfully activated Bluesoleil 10 with a genuine license key.
    16. -
    - -

    Congratulations! You can now use Bluesoleil 10 legally and safely on your computer. You can enjoy all the features and functions of Bluesoleil 10 without any worries. You can also get regular updates and technical support from IVT Corporation if you have any issues or questions.

    -

    Conclusion

    - -

    Bluesoleil 10 is a great Bluetooth software for Windows that can help you integrate various Bluetooth devices with your computer and enjoy wireless communication and entertainment. However, it is not a free software and you need to purchase a license key to activate it and enjoy its full features.

    - -

    If you don't want to spend money on it, you may be tempted to use Bluesoleil 10 crack serial 20 finger geburtstagsgr to get it for free. However, this is not a good idea as it comes with many risks and drawbacks. You may face legal issues, security threats, performance problems, or compatibility issues if you use Bluesoleil 10 crack serial 20 finger geburtstagsgr.

    - -

    Therefore, we recommend you to avoid using Bluesoleil 10 crack serial 20 finger geburtstagsgr and opt for a legal and safe way to use Bluesoleil 10. You can get a genuine license key from the official website of IVT Corporation and activate Bluesoleil 10 with ease. This way, you can support the developers and get the best Bluetooth experience on your computer.

    - -

    We hope this article has helped you learn how to download and activate Bluesoleil 10 crack serial 20 finger geburtstagsgr for free and why you should not use it. If you have any questions or suggestions, feel free to leave a comment below.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/vrajeshbhatt/Automated-Ticket-Management-System/static/css/bootstrap/utilities/_stretched-link.css b/spaces/vrajeshbhatt/Automated-Ticket-Management-System/static/css/bootstrap/utilities/_stretched-link.css deleted file mode 100644 index 20eb7dc16f6af79698729f8bb513ac61316a5abb..0000000000000000000000000000000000000000 --- a/spaces/vrajeshbhatt/Automated-Ticket-Management-System/static/css/bootstrap/utilities/_stretched-link.css +++ /dev/null @@ -1,10 +0,0 @@ -.stretched-link::after { - position: absolute; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1; - pointer-events: auto; - content: ""; - background-color: rgba(0, 0, 0, 0); } diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/utils/inverted_residual.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/utils/inverted_residual.py deleted file mode 100644 index 53b8fcd41f71d814738f1ac3f5acd3c3d701bf96..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/utils/inverted_residual.py +++ /dev/null @@ -1,208 +0,0 @@ -from annotator.uniformer.mmcv.cnn import ConvModule -from torch import nn -from torch.utils import checkpoint as cp - -from .se_layer import SELayer - - -class InvertedResidual(nn.Module): - """InvertedResidual block for MobileNetV2. - - Args: - in_channels (int): The input channels of the InvertedResidual block. - out_channels (int): The output channels of the InvertedResidual block. - stride (int): Stride of the middle (first) 3x3 convolution. - expand_ratio (int): Adjusts number of channels of the hidden layer - in InvertedResidual by this amount. - dilation (int): Dilation rate of depthwise conv. Default: 1 - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - stride, - expand_ratio, - dilation=1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - with_cp=False): - super(InvertedResidual, self).__init__() - self.stride = stride - assert stride in [1, 2], f'stride must in [1, 2]. ' \ - f'But received {stride}.' - self.with_cp = with_cp - self.use_res_connect = self.stride == 1 and in_channels == out_channels - hidden_dim = int(round(in_channels * expand_ratio)) - - layers = [] - if expand_ratio != 1: - layers.append( - ConvModule( - in_channels=in_channels, - out_channels=hidden_dim, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - layers.extend([ - ConvModule( - in_channels=hidden_dim, - out_channels=hidden_dim, - kernel_size=3, - stride=stride, - padding=dilation, - dilation=dilation, - groups=hidden_dim, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - ConvModule( - in_channels=hidden_dim, - out_channels=out_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - ]) - self.conv = nn.Sequential(*layers) - - def forward(self, x): - - def _inner_forward(x): - if self.use_res_connect: - return x + self.conv(x) - else: - return self.conv(x) - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class InvertedResidualV3(nn.Module): - """Inverted Residual Block for MobileNetV3. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - mid_channels (int): The input channels of the depthwise convolution. - kernel_size (int): The kernel size of the depthwise convolution. - Default: 3. - stride (int): The stride of the depthwise convolution. Default: 1. - se_cfg (dict): Config dict for se layer. Default: None, which means no - se layer. - with_expand_conv (bool): Use expand conv or not. If set False, - mid_channels must be the same with in_channels. Default: True. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - mid_channels, - kernel_size=3, - stride=1, - se_cfg=None, - with_expand_conv=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - with_cp=False): - super(InvertedResidualV3, self).__init__() - self.with_res_shortcut = (stride == 1 and in_channels == out_channels) - assert stride in [1, 2] - self.with_cp = with_cp - self.with_se = se_cfg is not None - self.with_expand_conv = with_expand_conv - - if self.with_se: - assert isinstance(se_cfg, dict) - if not self.with_expand_conv: - assert mid_channels == in_channels - - if self.with_expand_conv: - self.expand_conv = ConvModule( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.depthwise_conv = ConvModule( - in_channels=mid_channels, - out_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - padding=kernel_size // 2, - groups=mid_channels, - conv_cfg=dict( - type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - if self.with_se: - self.se = SELayer(**se_cfg) - - self.linear_conv = ConvModule( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - def forward(self, x): - - def _inner_forward(x): - out = x - - if self.with_expand_conv: - out = self.expand_conv(out) - - out = self.depthwise_conv(out) - - if self.with_se: - out = self.se(out) - - out = self.linear_conv(out) - - if self.with_res_shortcut: - return x + out - else: - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out diff --git a/spaces/w1zrd/MusicGen/audiocraft/modules/rope.py b/spaces/w1zrd/MusicGen/audiocraft/modules/rope.py deleted file mode 100644 index 4b8c70b9aba28eeb53d12ddc3de8852492847808..0000000000000000000000000000000000000000 --- a/spaces/w1zrd/MusicGen/audiocraft/modules/rope.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -from torch import nn -import torch - - -class XPos(nn.Module): - """Length-extrapolatable positional embedding (xPos) from [Sun et al 2022](https://arxiv.org/abs/2212.10554v1). - This applies an exponential decay to the RoPE rotation matrix. - - Args: - dim (int): Embedding dimension. - smoothing (float): Smoothing factor applied to the decay rates. - base_scale (int): Base decay rate, given in terms of scaling time. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, smoothing: float = 0.4, base_scale: int = 512, - device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - self.base_scale = base_scale - - half_dim = dim // 2 - adim = torch.arange(half_dim, device=device, dtype=dtype) - decay_rates = (adim / half_dim + smoothing) / (1.0 + smoothing) - self.register_buffer("decay_rates", decay_rates) - self.decay: tp.Optional[torch.Tensor] = None - - def get_decay(self, start: int, end: int): - """Create complex decay tensor, cache values for fast computation. - """ - if self.decay is None or end > self.decay.shape[0]: - assert isinstance(self.decay_rates, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.decay_rates.device, dtype=self.dtype) - power = idx / self.base_scale - scale = self.decay_rates ** power.unsqueeze(-1) - self.decay = torch.polar(scale, torch.zeros_like(scale)) - return self.decay[start:end] # [T, C/2] - - -class RotaryEmbedding(nn.Module): - """Rotary positional embedding (RoPE) from [Su et al 2022](https://arxiv.org/abs/2104.09864). - - Args: - dim (int): Embedding dimension (twice the number of frequencies). - max_period (float): Maximum period of the rotation frequencies. - xpos (bool): Use xPos, applies an exponential decay to rotation matrix. - scale (float): Scale of positional embedding, set to 0 to deactivate. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, max_period: float = 10000.0, xpos: bool = False, - scale: float = 1.0, device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - self.scale = scale - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - - adim = torch.arange(0, dim, 2, device=device, dtype=dtype)[: (dim // 2)] - frequencies = 1.0 / (max_period ** (adim / dim)) - self.register_buffer("frequencies", frequencies) - self.rotation: tp.Optional[torch.Tensor] = None - - self.xpos = XPos(dim, device=device, dtype=dtype) if xpos else None - - def get_rotation(self, start: int, end: int): - """Create complex rotation tensor, cache values for fast computation. - """ - if self.rotation is None or end > self.rotation.shape[0]: - assert isinstance(self.frequencies, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.frequencies.device, dtype=self.dtype) - angles = torch.outer(idx, self.frequencies) - self.rotation = torch.polar(torch.ones_like(angles), angles) - return self.rotation[start:end] - - def rotate(self, x: torch.Tensor, start: int = 0, invert_decay: bool = False): - """Apply rope rotation to query or key tensor. - """ - T = x.shape[1] - rotation = self.get_rotation(start, start + T).unsqueeze(0).unsqueeze(2) - - if self.xpos: - decay = self.xpos.get_decay(start, start + T).unsqueeze(0).unsqueeze(2) - else: - decay = 1.0 - - if invert_decay: - decay = decay ** -1 - - x_complex = torch.view_as_complex(x.to(self.dtype).reshape(*x.shape[:-1], -1, 2)) - scaled_rotation = (rotation * decay) * self.scale + (1.0 - self.scale) - x_out = torch.view_as_real(x_complex * scaled_rotation).flatten(-2) - - return x_out.type_as(x) - - def rotate_qk(self, query: torch.Tensor, key: torch.Tensor, start: int = 0): - """ Apply rope rotation to both query and key tensors. - Supports streaming mode, in which query and key are not expected to have the same shape. - In streaming mode, key will be of legnth [P + C] with P the cached past timesteps, but - query will be [C] (typically C == 1). - - Args: - query (torch.Tensor): Query to rotate. - key (torch.Tensor): Key to rotate. - start (int): Start index of the sequence for time offset. - """ - query_timesteps = query.shape[1] - key_timesteps = key.shape[1] - streaming_offset = key_timesteps - query_timesteps - - query_out = self.rotate(query, start + streaming_offset) - key_out = self.rotate(key, start, invert_decay=True) - - return query_out, key_out diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/actions/analyze_dep_libs.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/actions/analyze_dep_libs.py deleted file mode 100644 index 23c35cdf80ae5080b8482d2e5f3c82dd501c1a0a..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/actions/analyze_dep_libs.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/19 12:01 -@Author : alexanderwu -@File : analyze_dep_libs.py -""" - -from metagpt.actions import Action - -PROMPT = """You are an AI developer, trying to write a program that generates code for users based on their intentions. - -For the user's prompt: - ---- -The API is: {prompt} ---- - -We decide the generated files are: {filepaths_string} - -Now that we have a file list, we need to understand the shared dependencies they have. -Please list and briefly describe the shared contents between the files we are generating, including exported variables, -data patterns, id names of all DOM elements that javascript functions will use, message names and function names. -Focus only on the names of shared dependencies, do not add any other explanations. -""" - - -class AnalyzeDepLibs(Action): - def __init__(self, name, context=None, llm=None): - super().__init__(name, context, llm) - self.desc = "根据上下文,分析程序运行依赖库" - - async def run(self, requirement, filepaths_string): - # prompt = f"以下是产品需求文档(PRD):\n\n{prd}\n\n{PROMPT}" - prompt = PROMPT.format(prompt=requirement, filepaths_string=filepaths_string) - design_filenames = await self._aask(prompt) - return design_filenames diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/static/assets/login-bb708d78.js b/spaces/wffcyrus/MetaGPT-v1/metagpt/static/assets/login-bb708d78.js deleted file mode 100644 index 1fd761f98a91d02771b69c80e06fe5442978d9ca..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/static/assets/login-bb708d78.js +++ /dev/null @@ -1 +0,0 @@ -import{d as H,f as v,h as w,N as U,a1 as b,r as _,a as x,c as S,o as F,l,v as s,q as t,$ as e,u as i,t as n,a2 as T}from"./vue-e0bc46a9.js";import{t as h,u as R,P as V,l as q}from"./index-054e9309.js";import{F as f,I as L,B as G,g as N,h as E,L as g,i as z}from"./vendor-4cd7d240.js";import"./__commonjsHelpers__-042e6b4d.js";const A={width:"111",height:"23",viewBox:"0 0 111 23",fill:"none",xmlns:"http://www.w3.org/2000/svg"},I=U('',5),O=[I],P=H({__name:"LogoWhite",setup(k){return(p,c)=>(v(),w("svg",A,O))}}),W={class:"login"},D={class:"loginbg"},j=l("div",{class:"title"},"MetaGPT",-1),J={class:"loginform"},K={style:{width:"418px"}},Q={class:"formTitle"},X={class:"toolBoxBtn"},Y={class:"desc"},o1=H({__name:"login",setup(k){const p=b(),c=_(),C=x({username:"",password:""}),d=_(!1),M=S(()=>({username:[{required:!0,message:h("请输入用户名")}],password:[{required:!0,message:h("请输入密码")}]})),{setToken:y,getToken:Z}=R(),u=_(!1),$=async()=>{var m;if(u.value||await((m=c.value)==null?void 0:m.validate()))return;u.value=!0;const{isRequestSuccess:o,showRequestErrorMessage:r,data:B}=await q({account:C.username,password:C.password});if(u.value=!1,!o){r();return}y(B.token),p.replace({name:V.home})};return F(()=>{Z()&&p.replace({name:V.home})}),(a,o)=>(v(),w("div",W,[l("div",D,[s(P,{class:"logiWhite"}),j]),l("div",J,[l("div",K,[s(e(z),{ref_key:"formRef",ref:c,model:e(C),rules:e(M)},{default:t(()=>[s(e(f),{"hide-label":""},{default:t(()=>[l("div",Q,i(a.$t("账号登录")),1)]),_:1}),s(e(f),{"hide-label":"",field:"username"},{default:t(()=>[s(e(L),{modelValue:e(C).username,"onUpdate:modelValue":o[0]||(o[0]=r=>e(C).username=r),placeholder:a.$t("用户名/邮箱"),size:"large"},null,8,["modelValue","placeholder"])]),_:1}),s(e(f),{"hide-label":"",field:"password"},{default:t(()=>[s(e(L),{modelValue:e(C).password,"onUpdate:modelValue":o[1]||(o[1]=r=>e(C).password=r),type:"password",placeholder:a.$t("密码"),size:"large"},null,8,["modelValue","placeholder"])]),_:1}),s(e(G),{type:"primary",long:"",size:"large",disabled:!e(d),loading:e(u),onClick:$},{default:t(()=>[n(i(a.$t("登录")),1)]),_:1},8,["disabled","loading"]),s(e(N),{class:"spaceBTW toolBox",fill:""},{default:t(()=>[l("span",X,i(a.$t("忘记密码")),1)]),_:1}),l("div",null,[s(e(E),{modelValue:e(d),"onUpdate:modelValue":o[2]||(o[2]=r=>T(d)?d.value=r:null)},{default:t(()=>[l("div",Y,[n(i(a.$t("登录或完成注册即代表你同意")),1),s(e(g),null,{default:t(()=>[n(i(a.$t("用户协议")),1)]),_:1}),n(" "+i(a.$t("和"))+" ",1),s(e(g),null,{default:t(()=>[n(i(a.$t("隐私政策")),1)]),_:1})])]),_:1},8,["modelValue"])])]),_:1},8,["model","rules"])])])]))}});export{o1 as default}; diff --git a/spaces/wuhuik/bingo/src/components/chat-list.tsx b/spaces/wuhuik/bingo/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/wuhuik/bingo/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
    - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
    - ) -} diff --git a/spaces/wy213/213a/src/components/chat-list.tsx b/spaces/wy213/213a/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/wy213/213a/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
    - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
    - ) -} diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/reid_multibackend.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/reid_multibackend.py deleted file mode 100644 index 8439971e9b5345d803a2695056b536baee9063ad..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/reid_multibackend.py +++ /dev/null @@ -1,237 +0,0 @@ -import torch.nn as nn -import torch -from pathlib import Path -import numpy as np -from itertools import islice -import torchvision.transforms as transforms -import cv2 -import sys -import torchvision.transforms as T -from collections import OrderedDict, namedtuple -import gdown -from os.path import exists as file_exists - -from yolov5.utils.general import LOGGER, check_version, check_requirements -from trackers.strong_sort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name, - download_url, load_pretrained_weights) -from trackers.strong_sort.deep.models import build_model - - -def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): - # Check file(s) for acceptable suffix - if file and suffix: - if isinstance(suffix, str): - suffix = [suffix] - for f in file if isinstance(file, (list, tuple)) else [file]: - s = Path(f).suffix.lower() # file suffix - if len(s): - assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" - - -class ReIDDetectMultiBackend(nn.Module): - # ReID models MultiBackend class for python inference on various backends - def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False): - super().__init__() - - w = weights[0] if isinstance(weights, list) else weights - self.pt, self.jit, self.onnx, self.xml, self.engine, self.coreml, self.saved_model, \ - self.pb, self.tflite, self.edgetpu, self.tfjs, self.paddle = self.model_type(w) # get backend - self.fp16 = fp16 - self.fp16 &= self.pt or self.jit or self.engine # FP16 - - # Build transform functions - self.device = device - self.image_size=(256, 128) - self.pixel_mean=[0.485, 0.456, 0.406] - self.pixel_std=[0.229, 0.224, 0.225] - self.transforms = [] - self.transforms += [T.Resize(self.image_size)] - self.transforms += [T.ToTensor()] - self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)] - self.preprocess = T.Compose(self.transforms) - self.to_pil = T.ToPILImage() - - model_name = get_model_name(w) - - if w.suffix == '.pt': - model_url = get_model_url(w) - if not file_exists(w) and model_url is not None: - gdown.download(model_url, str(w), quiet=False) - elif file_exists(w): - pass - else: - print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:') - show_downloadeable_models() - exit() - - # Build model - self.model = build_model( - model_name, - num_classes=1, - pretrained=not (w and w.is_file()), - use_gpu=device - ) - - if self.pt: # PyTorch - # populate model arch with weights - if w and w.is_file() and w.suffix == '.pt': - load_pretrained_weights(self.model, w) - - self.model.to(device).eval() - self.model.half() if self.fp16 else self.model.float() - elif self.jit: - LOGGER.info(f'Loading {w} for TorchScript inference...') - self.model = torch.jit.load(w) - self.model.half() if self.fp16 else self.model.float() - elif self.onnx: # ONNX Runtime - LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - cuda = torch.cuda.is_available() and device.type != 'cpu' - #check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) - import onnxruntime - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] - self.session = onnxruntime.InferenceSession(str(w), providers=providers) - elif self.engine: # TensorRT - LOGGER.info(f'Loading {w} for TensorRT inference...') - import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 - if device.type == 'cpu': - device = torch.device('cuda:0') - Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - logger = trt.Logger(trt.Logger.INFO) - with open(w, 'rb') as f, trt.Runtime(logger) as runtime: - self.model_ = runtime.deserialize_cuda_engine(f.read()) - self.context = self.model_.create_execution_context() - self.bindings = OrderedDict() - self.fp16 = False # default updated below - dynamic = False - for index in range(self.model_.num_bindings): - name = self.model_.get_binding_name(index) - dtype = trt.nptype(self.model_.get_binding_dtype(index)) - if self.model_.binding_is_input(index): - if -1 in tuple(self.model_.get_binding_shape(index)): # dynamic - dynamic = True - self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2])) - if dtype == np.float16: - self.fp16 = True - shape = tuple(self.context.get_binding_shape(index)) - im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) - self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) - self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items()) - batch_size = self.bindings['images'].shape[0] # if dynamic, this is instead max batch size - elif self.xml: # OpenVINO - LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - from openvino.runtime import Core, Layout, get_batch - ie = Core() - if not Path(w).is_file(): # if not *.xml - w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir - network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) - if network.get_parameters()[0].get_layout().empty: - network.get_parameters()[0].set_layout(Layout("NCWH")) - batch_dim = get_batch(network) - if batch_dim.is_static: - batch_size = batch_dim.get_length() - self.executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 - self.output_layer = next(iter(self.executable_network.outputs)) - - elif self.tflite: - LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu - from tflite_runtime.interpreter import Interpreter, load_delegate - except ImportError: - import tensorflow as tf - Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, - self.interpreter = tf.lite.Interpreter(model_path=w) - self.interpreter.allocate_tensors() - # Get input and output tensors. - self.input_details = self.interpreter.get_input_details() - self.output_details = self.interpreter.get_output_details() - - # Test model on random input data. - input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32) - self.interpreter.set_tensor(self.input_details[0]['index'], input_data) - - self.interpreter.invoke() - - # The function `get_tensor()` returns a copy of the tensor data. - output_data = self.interpreter.get_tensor(self.output_details[0]['index']) - else: - print('This model framework is not supported yet!') - exit() - - - @staticmethod - def model_type(p='path/to/model.pt'): - # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx - from export import export_formats - sf = list(export_formats().Suffix) # export suffixes - check_suffix(p, sf) # checks - types = [s in Path(p).name for s in sf] - types[8] &= not types[9] # tflite &= not edgetpu - return types - - def _preprocess(self, im_batch): - - images = [] - for element in im_batch: - image = self.to_pil(element) - image = self.preprocess(image) - images.append(image) - - images = torch.stack(images, dim=0) - images = images.to(self.device) - - return images - - - def forward(self, im_batch): - - # preprocess batch - im_batch = self._preprocess(im_batch) - - # batch to half - if self.fp16 and im_batch.dtype != torch.float16: - im_batch = im_batch.half() - - # batch processing - features = [] - if self.pt: - features = self.model(im_batch) - elif self.jit: # TorchScript - features = self.model(im_batch) - elif self.onnx: # ONNX Runtime - im_batch = im_batch.cpu().numpy() # torch to numpy - features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0] - elif self.engine: # TensorRT - if True and im_batch.shape != self.bindings['images'].shape: - i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output')) - self.context.set_binding_shape(i_in, im_batch.shape) # reshape if dynamic - self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape) - self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) - s = self.bindings['images'].shape - assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" - self.binding_addrs['images'] = int(im_batch.data_ptr()) - self.context.execute_v2(list(self.binding_addrs.values())) - features = self.bindings['output'].data - elif self.xml: # OpenVINO - im_batch = im_batch.cpu().numpy() # FP32 - features = self.executable_network([im_batch])[self.output_layer] - else: - print('Framework not supported at the moment, we are working on it...') - exit() - - if isinstance(features, (list, tuple)): - return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features] - else: - return self.from_numpy(features) - - def from_numpy(self, x): - return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x - - def warmup(self, imgsz=[(256, 128, 3)]): - # Warmup model by running inference once - warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb - if any(warmup_types) and self.device.type != 'cpu': - im = [np.empty(*imgsz).astype(np.uint8)] # input - for _ in range(2 if self.jit else 1): # - self.forward(im) # warmup \ No newline at end of file diff --git a/spaces/xfys/yolov5_tracking/val_utils/scripts/run_burst.py b/spaces/xfys/yolov5_tracking/val_utils/scripts/run_burst.py deleted file mode 100644 index 595526436eb12c382123cf5ce4972e4aeb44a71f..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/val_utils/scripts/run_burst.py +++ /dev/null @@ -1,173 +0,0 @@ -""" run_burst.py - -The example commands given below expect the following folder structure: - -- data - - gt - - burst - - {val,test} - - all_classes - - all_classes.json (filename is irrelevant) - - trackers - - burst - - exemplar_guided - - {val,test} - - my_tracking_method - - data - - results.json (filename is irrelevant) - - class_guided - - {val,test} - - my_other_tracking_method - - data - - results.json (filename is irrelevant) - -Run example: - -1) Exemplar-guided tasks (all three tasks share the same eval logic): -run_burst.py --USE_PARALLEL True --EXEMPLAR_GUIDED True --GT_FOLDER ../data/gt/burst/{val,test}/all_classes --TRACKERS_FOLDER ../data/trackers/burst/exemplar_guided/{val,test} - -2) Class-guided tasks (common class and long-tail): -run_burst.py --USE_PARALLEL FTrue --EXEMPLAR_GUIDED False --GT_FOLDER ../data/gt/burst/{val,test}/all_classes --TRACKERS_FOLDER ../data/trackers/burst/class_guided/{val,test} - -3) Refer to run_burst_ow.py for open world evaluation - -Command Line Arguments: Defaults, # Comments - Eval arguments: - 'USE_PARALLEL': False, - 'NUM_PARALLEL_CORES': 8, - 'BREAK_ON_ERROR': True, - 'PRINT_RESULTS': True, - 'PRINT_ONLY_COMBINED': False, - 'PRINT_CONFIG': True, - 'TIME_PROGRESS': True, - 'OUTPUT_SUMMARY': True, - 'OUTPUT_DETAILED': True, - 'PLOT_CURVES': True, - Dataset arguments: - 'GT_FOLDER': os.path.join(code_path, 'data/gt/burst/val'), # Location of GT data - 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/burst/class-guided/'), # Trackers location - 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER) - 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder) - 'CLASSES_TO_EVAL': None, # Classes to eval (if None, all classes) - 'SPLIT_TO_EVAL': 'training', # Valid: 'training', 'val' - 'PRINT_CONFIG': True, # Whether to print current config - 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER - 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER - 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL - 'MAX_DETECTIONS': 300, # Number of maximal allowed detections per image (0 for unlimited) - Metric arguments: - 'METRICS': ['HOTA', 'CLEAR', 'Identity', 'TrackMAP'] -""" - -import sys -import os -import argparse -from tabulate import tabulate -from multiprocessing import freeze_support - -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) -import trackeval # noqa: E402 - - -def main(): - freeze_support() - - # Command line interface: - default_eval_config = trackeval.Evaluator.get_default_eval_config() - default_eval_config['PRINT_ONLY_COMBINED'] = True - default_eval_config['DISPLAY_LESS_PROGRESS'] = True - default_eval_config['PLOT_CURVES'] = False - default_eval_config["OUTPUT_DETAILED"] = False - default_eval_config["PRINT_RESULTS"] = False - default_eval_config["OUTPUT_SUMMARY"] = False - - default_dataset_config = trackeval.datasets.BURST.get_default_dataset_config() - - # default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity', 'TrackMAP']} - # default_metrics_config = {'METRICS': ['HOTA']} - default_metrics_config = {'METRICS': ['HOTA', 'TrackMAP']} - config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs - parser = argparse.ArgumentParser() - for setting in config.keys(): - if type(config[setting]) == list or type(config[setting]) == type(None): - parser.add_argument("--" + setting, nargs='+') - else: - parser.add_argument("--" + setting) - args = parser.parse_args().__dict__ - for setting in args.keys(): - if args[setting] is not None: - if type(config[setting]) == type(True): - if args[setting] == 'True': - x = True - elif args[setting] == 'False': - x = False - else: - raise Exception('Command line parameter ' + setting + 'must be True or False') - elif type(config[setting]) == type(1): - x = int(args[setting]) - elif type(args[setting]) == type(None): - x = None - else: - x = args[setting] - config[setting] = x - eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()} - dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()} - metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()} - - # Run code - evaluator = trackeval.Evaluator(eval_config) - dataset_list = [trackeval.datasets.BURST(dataset_config)] - metrics_list = [] - for metric in [trackeval.metrics.TrackMAP, trackeval.metrics.CLEAR, trackeval.metrics.Identity, - trackeval.metrics.HOTA]: - if metric.get_name() in metrics_config['METRICS']: - metrics_list.append(metric()) - if len(metrics_list) == 0: - raise Exception('No metrics selected for evaluation') - output_res, output_msg = evaluator.evaluate(dataset_list, metrics_list, show_progressbar=True) - - class_name_to_id = {x['name']: x['id'] for x in dataset_list[0].gt_data['categories']} - known_list = [4, 13, 1038, 544, 1057, 34, 35, 36, 41, 45, 58, 60, 579, 1091, 1097, 1099, 78, 79, 81, 91, 1115, - 1117, 95, 1122, 99, 1132, 621, 1135, 625, 118, 1144, 126, 642, 1155, 133, 1162, 139, 154, 174, 185, - 699, 1215, 714, 717, 1229, 211, 729, 221, 229, 747, 235, 237, 779, 276, 805, 299, 829, 852, 347, - 371, 382, 896, 392, 926, 937, 428, 429, 961, 452, 979, 980, 982, 475, 480, 993, 1001, 502, 1018] - - row_labels = ("HOTA", "DetA", "AssA", "AP") - trackers = list(output_res['BURST'].keys()) - print("\n") - - def average_metric(m): - return round(100*sum(m) / len(m), 2) - - for tracker in trackers: - res = output_res['BURST'][tracker]['COMBINED_SEQ'] - all_names = [x for x in res.keys() if (x != 'cls_comb_cls_av') and (x != 'cls_comb_det_av')] - - class_split_names = { - "All": [x for x in res.keys() if (x != 'cls_comb_cls_av') and (x != 'cls_comb_det_av')], - "Common": [x for x in all_names if class_name_to_id[x] in known_list], - "Uncommon": [x for x in all_names if class_name_to_id[x] not in known_list] - } - - # table columns: 'all', 'common', 'uncommon' - # table rows: HOTA, AssA, DetA, mAP - table_data = [] - - for row_label in row_labels: - row = [row_label] - for split_name in ["All", "Common", "Uncommon"]: - split_classes = class_split_names[split_name] - - if row_label == "AP": - row.append(average_metric([res[c]['TrackMAP']["AP_all"].mean() for c in split_classes])) - else: - row.append(average_metric([res[c]['HOTA'][row_label].mean() for c in split_classes])) - - table_data.append(row) - - print(f"Results for Tracker: {tracker}\n") - print(tabulate(table_data, ["Metric", "All", "Common", "Uncommon"])) - - -if __name__ == '__main__': - main() diff --git a/spaces/xfys/yolov5_tracking/val_utils/trackeval/metrics/__init__.py b/spaces/xfys/yolov5_tracking/val_utils/trackeval/metrics/__init__.py deleted file mode 100644 index 1f84774682aa5ca52e98ae4424226bc3d802d3e3..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/val_utils/trackeval/metrics/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .hota import HOTA -from .clear import CLEAR -from .identity import Identity -from .count import Count -from .j_and_f import JAndF -from .track_map import TrackMAP -from .vace import VACE -from .ideucl import IDEucl \ No newline at end of file diff --git a/spaces/xiangdy/chatGPT/modules/models/inspurai.py b/spaces/xiangdy/chatGPT/modules/models/inspurai.py deleted file mode 100644 index c590859fa7717d032290ccc490d22f4494541576..0000000000000000000000000000000000000000 --- a/spaces/xiangdy/chatGPT/modules/models/inspurai.py +++ /dev/null @@ -1,345 +0,0 @@ -# 代码主要来源于 https://github.com/Shawn-Inspur/Yuan-1.0/blob/main/yuan_api/inspurai.py - -import hashlib -import json -import os -import time -import uuid -from datetime import datetime - -import pytz -import requests - -from modules.presets import NO_APIKEY_MSG -from modules.models.base_model import BaseLLMModel - - -class Example: - """ store some examples(input, output pairs and formats) for few-shots to prime the model.""" - - def __init__(self, inp, out): - self.input = inp - self.output = out - self.id = uuid.uuid4().hex - - def get_input(self): - """return the input of the example.""" - return self.input - - def get_output(self): - """Return the output of the example.""" - return self.output - - def get_id(self): - """Returns the unique ID of the example.""" - return self.id - - def as_dict(self): - return { - "input": self.get_input(), - "output": self.get_output(), - "id": self.get_id(), - } - - -class Yuan: - """The main class for a user to interface with the Inspur Yuan API. - A user can set account info and add examples of the API request. - """ - - def __init__(self, - engine='base_10B', - temperature=0.9, - max_tokens=100, - input_prefix='', - input_suffix='\n', - output_prefix='答:', - output_suffix='\n\n', - append_output_prefix_to_query=False, - topK=1, - topP=0.9, - frequencyPenalty=1.2, - responsePenalty=1.2, - noRepeatNgramSize=2): - - self.examples = {} - self.engine = engine - self.temperature = temperature - self.max_tokens = max_tokens - self.topK = topK - self.topP = topP - self.frequencyPenalty = frequencyPenalty - self.responsePenalty = responsePenalty - self.noRepeatNgramSize = noRepeatNgramSize - self.input_prefix = input_prefix - self.input_suffix = input_suffix - self.output_prefix = output_prefix - self.output_suffix = output_suffix - self.append_output_prefix_to_query = append_output_prefix_to_query - self.stop = (output_suffix + input_prefix).strip() - self.api = None - - # if self.engine not in ['base_10B','translate','dialog']: - # raise Exception('engine must be one of [\'base_10B\',\'translate\',\'dialog\'] ') - def set_account(self, api_key): - account = api_key.split('||') - self.api = YuanAPI(user=account[0], phone=account[1]) - - def add_example(self, ex): - """Add an example to the object. - Example must be an instance of the Example class.""" - assert isinstance(ex, Example), "Please create an Example object." - self.examples[ex.get_id()] = ex - - def delete_example(self, id): - """Delete example with the specific id.""" - if id in self.examples: - del self.examples[id] - - def get_example(self, id): - """Get a single example.""" - return self.examples.get(id, None) - - def get_all_examples(self): - """Returns all examples as a list of dicts.""" - return {k: v.as_dict() for k, v in self.examples.items()} - - def get_prime_text(self): - """Formats all examples to prime the model.""" - return "".join( - [self.format_example(ex) for ex in self.examples.values()]) - - def get_engine(self): - """Returns the engine specified for the API.""" - return self.engine - - def get_temperature(self): - """Returns the temperature specified for the API.""" - return self.temperature - - def get_max_tokens(self): - """Returns the max tokens specified for the API.""" - return self.max_tokens - - def craft_query(self, prompt): - """Creates the query for the API request.""" - q = self.get_prime_text( - ) + self.input_prefix + prompt + self.input_suffix - if self.append_output_prefix_to_query: - q = q + self.output_prefix - - return q - - def format_example(self, ex): - """Formats the input, output pair.""" - return self.input_prefix + ex.get_input( - ) + self.input_suffix + self.output_prefix + ex.get_output( - ) + self.output_suffix - - def response(self, - query, - engine='base_10B', - max_tokens=20, - temperature=0.9, - topP=0.1, - topK=1, - frequencyPenalty=1.0, - responsePenalty=1.0, - noRepeatNgramSize=0): - """Obtains the original result returned by the API.""" - - if self.api is None: - return NO_APIKEY_MSG - try: - # requestId = submit_request(query,temperature,topP,topK,max_tokens, engine) - requestId = self.api.submit_request(query, temperature, topP, topK, max_tokens, engine, frequencyPenalty, - responsePenalty, noRepeatNgramSize) - response_text = self.api.reply_request(requestId) - except Exception as e: - raise e - - return response_text - - def del_special_chars(self, msg): - special_chars = ['', '', '#', '▃', '▁', '▂', ' '] - for char in special_chars: - msg = msg.replace(char, '') - return msg - - def submit_API(self, prompt, trun=[]): - """Submit prompt to yuan API interface and obtain an pure text reply. - :prompt: Question or any content a user may input. - :return: pure text response.""" - query = self.craft_query(prompt) - res = self.response(query, engine=self.engine, - max_tokens=self.max_tokens, - temperature=self.temperature, - topP=self.topP, - topK=self.topK, - frequencyPenalty=self.frequencyPenalty, - responsePenalty=self.responsePenalty, - noRepeatNgramSize=self.noRepeatNgramSize) - if 'resData' in res and res['resData'] != None: - txt = res['resData'] - else: - txt = '模型返回为空,请尝试修改输入' - # 单独针对翻译模型的后处理 - if self.engine == 'translate': - txt = txt.replace(' ##', '').replace(' "', '"').replace(": ", ":").replace(" ,", ",") \ - .replace('英文:', '').replace('文:', '').replace("( ", "(").replace(" )", ")") - else: - txt = txt.replace(' ', '') - txt = self.del_special_chars(txt) - - # trun多结束符截断模型输出 - if isinstance(trun, str): - trun = [trun] - try: - if trun != None and isinstance(trun, list) and trun != []: - for tr in trun: - if tr in txt and tr != "": - txt = txt[:txt.index(tr)] - else: - continue - except: - return txt - return txt - - -class YuanAPI: - ACCOUNT = '' - PHONE = '' - - SUBMIT_URL = "http://api.airyuan.cn:32102/v1/interface/api/infer/getRequestId?" - REPLY_URL = "http://api.airyuan.cn:32102/v1/interface/api/result?" - - def __init__(self, user, phone): - self.ACCOUNT = user - self.PHONE = phone - - @staticmethod - def code_md5(str): - code = str.encode("utf-8") - m = hashlib.md5() - m.update(code) - result = m.hexdigest() - return result - - @staticmethod - def rest_get(url, header, timeout, show_error=False): - '''Call rest get method''' - try: - response = requests.get(url, headers=header, timeout=timeout, verify=False) - return response - except Exception as exception: - if show_error: - print(exception) - return None - - def header_generation(self): - """Generate header for API request.""" - t = datetime.now(pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d") - token = self.code_md5(self.ACCOUNT + self.PHONE + t) - headers = {'token': token} - return headers - - def submit_request(self, query, temperature, topP, topK, max_tokens, engine, frequencyPenalty, responsePenalty, - noRepeatNgramSize): - """Submit query to the backend server and get requestID.""" - headers = self.header_generation() - # url=SUBMIT_URL + "account={0}&data={1}&temperature={2}&topP={3}&topK={4}&tokensToGenerate={5}&type={6}".format(ACCOUNT,query,temperature,topP,topK,max_tokens,"api") - # url=SUBMIT_URL + "engine={0}&account={1}&data={2}&temperature={3}&topP={4}&topK={5}&tokensToGenerate={6}" \ - # "&type={7}".format(engine,ACCOUNT,query,temperature,topP,topK, max_tokens,"api") - url = self.SUBMIT_URL + "engine={0}&account={1}&data={2}&temperature={3}&topP={4}&topK={5}&tokensToGenerate={6}" \ - "&type={7}&frequencyPenalty={8}&responsePenalty={9}&noRepeatNgramSize={10}". \ - format(engine, self.ACCOUNT, query, temperature, topP, topK, max_tokens, "api", frequencyPenalty, - responsePenalty, noRepeatNgramSize) - response = self.rest_get(url, headers, 30) - response_text = json.loads(response.text) - if response_text["flag"]: - requestId = response_text["resData"] - return requestId - else: - raise RuntimeWarning(response_text) - - def reply_request(self, requestId, cycle_count=5): - """Check reply API to get the inference response.""" - url = self.REPLY_URL + "account={0}&requestId={1}".format(self.ACCOUNT, requestId) - headers = self.header_generation() - response_text = {"flag": True, "resData": None} - for i in range(cycle_count): - response = self.rest_get(url, headers, 30, show_error=True) - response_text = json.loads(response.text) - if response_text["resData"] is not None: - return response_text - if response_text["flag"] is False and i == cycle_count - 1: - raise RuntimeWarning(response_text) - time.sleep(3) - return response_text - - -class Yuan_Client(BaseLLMModel): - - def __init__(self, model_name, api_key, user_name="", system_prompt=None): - super().__init__(model_name=model_name, user=user_name) - self.history = [] - self.api_key = api_key - self.system_prompt = system_prompt - - self.input_prefix = "" - self.output_prefix = "" - - def set_text_prefix(self, option, value): - if option == 'input_prefix': - self.input_prefix = value - elif option == 'output_prefix': - self.output_prefix = value - - def get_answer_at_once(self): - # yuan temperature is (0,1] and base model temperature is [0,2], and yuan 0.9 == base 1 so need to convert - temperature = self.temperature if self.temperature <= 1 else 0.9 + (self.temperature - 1) / 10 - topP = self.top_p - topK = self.n_choices - # max_tokens should be in [1,200] - max_tokens = self.max_generation_token if self.max_generation_token is not None else 50 - if max_tokens > 200: - max_tokens = 200 - stop = self.stop_sequence if self.stop_sequence is not None else [] - examples = [] - system_prompt = self.system_prompt - if system_prompt is not None: - lines = system_prompt.splitlines() - # TODO: support prefixes in system prompt or settings - """ - if lines[0].startswith('-'): - prefixes = lines.pop()[1:].split('|') - self.input_prefix = prefixes[0] - if len(prefixes) > 1: - self.output_prefix = prefixes[1] - if len(prefixes) > 2: - stop = prefixes[2].split(',') - """ - for i in range(0, len(lines), 2): - in_line = lines[i] - out_line = lines[i + 1] if i + 1 < len(lines) else "" - examples.append((in_line, out_line)) - yuan = Yuan(engine=self.model_name.replace('yuanai-1.0-', ''), - temperature=temperature, - max_tokens=max_tokens, - topK=topK, - topP=topP, - input_prefix=self.input_prefix, - input_suffix="", - output_prefix=self.output_prefix, - output_suffix="".join(stop), - ) - if not self.api_key: - return NO_APIKEY_MSG, 0 - yuan.set_account(self.api_key) - - for in_line, out_line in examples: - yuan.add_example(Example(inp=in_line, out=out_line)) - - prompt = self.history[-1]["content"] - answer = yuan.submit_API(prompt, trun=stop) - return answer, len(answer) diff --git a/spaces/xiaoxuezi/spleeter/spleeter/audio/spectrogram.py b/spaces/xiaoxuezi/spleeter/spleeter/audio/spectrogram.py deleted file mode 100644 index 280688891b9e7ee4ed822cbfaef9cd2c0d69b040..0000000000000000000000000000000000000000 --- a/spaces/xiaoxuezi/spleeter/spleeter/audio/spectrogram.py +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/env python -# coding: utf8 - -""" Spectrogram specific data augmentation. """ - -# pyright: reportMissingImports=false -# pylint: disable=import-error -import numpy as np -import tensorflow as tf -from tensorflow.signal import hann_window, stft - -# pylint: enable=import-error - -__email__ = "spleeter@deezer.com" -__author__ = "Deezer Research" -__license__ = "MIT License" - - -def compute_spectrogram_tf( - waveform: tf.Tensor, - frame_length: int = 2048, - frame_step: int = 512, - spec_exponent: float = 1.0, - window_exponent: float = 1.0, -) -> tf.Tensor: - """ - Compute magnitude / power spectrogram from waveform as a - `n_samples x n_channels` tensor. - - Parameters: - waveform (tensorflow.Tensor): - Input waveform as `(times x number of channels)` tensor. - frame_length (int): - Length of a STFT frame to use. - frame_step (int): - HOP between successive frames. - spec_exponent (float): - Exponent of the spectrogram (usually 1 for magnitude - spectrogram, or 2 for power spectrogram). - window_exponent (float): - Exponent applied to the Hann windowing function (may be - useful for making perfect STFT/iSTFT reconstruction). - - Returns: - tensorflow.Tensor: - Computed magnitude / power spectrogram as a - `(T x F x n_channels)` tensor. - """ - stft_tensor: tf.Tensor = tf.transpose( - stft( - tf.transpose(waveform), - frame_length, - frame_step, - window_fn=lambda f, dtype: hann_window( - f, periodic=True, dtype=waveform.dtype - ) - ** window_exponent, - ), - perm=[1, 2, 0], - ) - return tf.abs(stft_tensor) ** spec_exponent - - -def time_stretch( - spectrogram: tf.Tensor, - factor: float = 1.0, - method: tf.image.ResizeMethod = tf.image.ResizeMethod.BILINEAR, -) -> tf.Tensor: - """ - Time stretch a spectrogram preserving shape in tensorflow. Note that - this is an approximation in the frequency domain. - - Parameters: - spectrogram (tensorflow.Tensor): - Input spectrogram to be time stretched as tensor. - factor (float): - (Optional) Time stretch factor, must be > 0, default to `1`. - method (tensorflow.image.ResizeMethod): - (Optional) Interpolation method, default to `BILINEAR`. - - Returns: - tensorflow.Tensor: - Time stretched spectrogram as tensor with same shape. - """ - T = tf.shape(spectrogram)[0] - T_ts = tf.cast(tf.cast(T, tf.float32) * factor, tf.int32)[0] - F = tf.shape(spectrogram)[1] - ts_spec = tf.image.resize_images( - spectrogram, [T_ts, F], method=method, align_corners=True - ) - return tf.image.resize_image_with_crop_or_pad(ts_spec, T, F) - - -def random_time_stretch( - spectrogram: tf.Tensor, factor_min: float = 0.9, factor_max: float = 1.1, **kwargs -) -> tf.Tensor: - """ - Time stretch a spectrogram preserving shape with random ratio in - tensorflow. Applies time_stretch to spectrogram with a random ratio - drawn uniformly in `[factor_min, factor_max]`. - - Parameters: - spectrogram (tensorflow.Tensor): - Input spectrogram to be time stretched as tensor. - factor_min (float): - (Optional) Min time stretch factor, default to `0.9`. - factor_max (float): - (Optional) Max time stretch factor, default to `1.1`. - - Returns: - tensorflow.Tensor: - Randomly time stretched spectrogram as tensor with same shape. - """ - factor = ( - tf.random_uniform(shape=(1,), seed=0) * (factor_max - factor_min) + factor_min - ) - return time_stretch(spectrogram, factor=factor, **kwargs) - - -def pitch_shift( - spectrogram: tf.Tensor, - semitone_shift: float = 0.0, - method: tf.image.ResizeMethod = tf.image.ResizeMethod.BILINEAR, -) -> tf.Tensor: - """ - Pitch shift a spectrogram preserving shape in tensorflow. Note that - this is an approximation in the frequency domain. - - Parameters: - spectrogram (tensorflow.Tensor): - Input spectrogram to be pitch shifted as tensor. - semitone_shift (float): - (Optional) Pitch shift in semitone, default to `0.0`. - method (tensorflow.image.ResizeMethod): - (Optional) Interpolation method, default to `BILINEAR`. - - Returns: - tensorflow.Tensor: - Pitch shifted spectrogram (same shape as spectrogram). - """ - factor = 2 ** (semitone_shift / 12.0) - T = tf.shape(spectrogram)[0] - F = tf.shape(spectrogram)[1] - F_ps = tf.cast(tf.cast(F, tf.float32) * factor, tf.int32)[0] - ps_spec = tf.image.resize_images( - spectrogram, [T, F_ps], method=method, align_corners=True - ) - paddings = [[0, 0], [0, tf.maximum(0, F - F_ps)], [0, 0]] - return tf.pad(ps_spec[:, :F, :], paddings, "CONSTANT") - - -def random_pitch_shift( - spectrogram: tf.Tensor, shift_min: float = -1.0, shift_max: float = 1.0, **kwargs -) -> tf.Tensor: - """ - Pitch shift a spectrogram preserving shape with random ratio in - tensorflow. Applies pitch_shift to spectrogram with a random shift - amount (expressed in semitones) drawn uniformly in - `[shift_min, shift_max]`. - - Parameters: - spectrogram (tensorflow.Tensor): - Input spectrogram to be pitch shifted as tensor. - shift_min (float): - (Optional) Min pitch shift in semitone, default to -1. - shift_max (float): - (Optional) Max pitch shift in semitone, default to 1. - - Returns: - tensorflow.Tensor: - Randomly pitch shifted spectrogram (same shape as spectrogram). - """ - semitone_shift = ( - tf.random_uniform(shape=(1,), seed=0) * (shift_max - shift_min) + shift_min - ) - return pitch_shift(spectrogram, semitone_shift=semitone_shift, **kwargs) diff --git a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h b/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h deleted file mode 100644 index ad1311a78f61303616504eb991aaa9c4a93d9948..0000000000000000000000000000000000000000 --- a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h +++ /dev/null @@ -1,33 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once -#include - -namespace groundingdino { - -at::Tensor ms_deform_attn_cuda_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step); - -std::vector ms_deform_attn_cuda_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step); - -} // namespace groundingdino \ No newline at end of file diff --git a/spaces/xu1998hz/sescore_english_coco/README.md b/spaces/xu1998hz/sescore_english_coco/README.md deleted file mode 100644 index b2e0b3d72009245c80785661ff53a15f37ce235e..0000000000000000000000000000000000000000 --- a/spaces/xu1998hz/sescore_english_coco/README.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: SEScore -datasets: -- null -tags: -- evaluate -- metric -description: 'SEScore: a text generation evaluation metric' -sdk: gradio -sdk_version: 3.0.2 -app_file: app.py -pinned: false -duplicated_from: xu1998hz/sescore ---- - -# Metric Card for SEScore -![alt text](https://huggingface.co/spaces/xu1998hz/sescore/blob/main/img/logo_sescore.png) - -## Metric Description -*SEScore is an unsupervised learned evaluation metric trained on synthesized dataset* - -## How to Use - -*Provide simplest possible example for using the metric* - -### Inputs -*SEScore takes input of predictions (a list of candidate translations) and references (a list of reference translations).* - -### Output Values - -*Output value is between 0 to -25* - -#### Values from Popular Papers - - -### Examples -*Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.* - -## Limitations and Bias -*Note any known limitations or biases that the metric has, with links and references if possible.* - -## Citation -*Cite the source where this metric was introduced.* - -## Further References -*Add any useful further references.* diff --git a/spaces/xun/Qwen-Token-Calc/qwen/configuration_qwen.py b/spaces/xun/Qwen-Token-Calc/qwen/configuration_qwen.py deleted file mode 100644 index f8fe2cb434cefda404c506d541959e2fefc86884..0000000000000000000000000000000000000000 --- a/spaces/xun/Qwen-Token-Calc/qwen/configuration_qwen.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) Alibaba Cloud. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from transformers import PretrainedConfig - - -class QWenConfig(PretrainedConfig): - model_type = "qwen" - keys_to_ignore_at_inference = ["past_key_values"] - - def __init__( - self, - vocab_size=151936, - hidden_size=4096, - num_hidden_layers=32, - num_attention_heads=32, - emb_dropout_prob=0.0, - attn_dropout_prob=0.0, - layer_norm_epsilon=1e-6, - initializer_range=0.02, - max_position_embeddings=8192, - scale_attn_weights=True, - use_cache=True, - bf16=False, - fp16=False, - fp32=False, - kv_channels=128, - rotary_pct=1.0, - rotary_emb_base=10000, - use_dynamic_ntk=True, - use_logn_attn=True, - use_flash_attn="auto", - intermediate_size=22016, - no_bias=True, - tie_word_embeddings=False, - use_cache_quantization=False, - use_cache_kernel=False, - softmax_in_fp32=False, - **kwargs, - ): - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.intermediate_size = intermediate_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.emb_dropout_prob = emb_dropout_prob - self.attn_dropout_prob = attn_dropout_prob - self.layer_norm_epsilon = layer_norm_epsilon - self.initializer_range = initializer_range - self.scale_attn_weights = scale_attn_weights - self.use_cache = use_cache - self.max_position_embeddings = max_position_embeddings - self.bf16 = bf16 - self.fp16 = fp16 - self.fp32 = fp32 - self.kv_channels = kv_channels - self.rotary_pct = rotary_pct - self.rotary_emb_base = rotary_emb_base - self.use_dynamic_ntk = use_dynamic_ntk - self.use_logn_attn = use_logn_attn - self.use_flash_attn = use_flash_attn - self.no_bias = no_bias - self.use_cache_quantization = use_cache_quantization - self.use_cache_kernel = use_cache_kernel - self.softmax_in_fp32 = softmax_in_fp32 - super().__init__( - tie_word_embeddings=tie_word_embeddings, - **kwargs - ) diff --git a/spaces/yangogo/bingo/src/pages/api/image.ts b/spaces/yangogo/bingo/src/pages/api/image.ts deleted file mode 100644 index 4b894bea86050c0f3888cc56f60c0cb7f8b57cfc..0000000000000000000000000000000000000000 --- a/spaces/yangogo/bingo/src/pages/api/image.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' -import { createImage } from '@/lib/bots/bing/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const { prompt, id } = req.query - if (!prompt) { - return res.json({ - result: { - value: 'Image', - message: 'No Prompt' - } - }) - } - try { - const headers = createHeaders(req.cookies, { - IMAGE_BING_COOKIE: process.env.IMAGE_BING_COOKIE - }) - - debug('headers', headers) - const response = await createImage(String(prompt), String(id), { - ...headers, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - }) - res.writeHead(200, { - 'Content-Type': 'text/plain; charset=UTF-8', - }) - return res.end(response) - } catch (e) { - return res.json({ - result: { - value: 'Error', - message: `${e}` - } - }) - } -} diff --git a/spaces/yanli01/gpt01/custom.css b/spaces/yanli01/gpt01/custom.css deleted file mode 100644 index 5143eb138ea2469d8c457c71cb210fd3fb7cbe15..0000000000000000000000000000000000000000 --- a/spaces/yanli01/gpt01/custom.css +++ /dev/null @@ -1,162 +0,0 @@ -:root { - --chatbot-color-light: #F3F3F3; - --chatbot-color-dark: #121111; -} - -/* status_display */ -#status_display { - display: flex; - min-height: 2.5em; - align-items: flex-end; - justify-content: flex-end; -} -#status_display p { - font-size: .85em; - font-family: monospace; - color: var(--body-text-color-subdued); -} - -#chuanhu_chatbot, #status_display { - transition: all 0.6s; -} -/* list */ -ol:not(.options), ul:not(.options) { - padding-inline-start: 2em !important; -} - -/* 亮色 */ -#chuanhu_chatbot { - background-color: var(--chatbot-color-light) !important; -} -[data-testid = "bot"] { - background-color: #FFFFFF !important; -} -[data-testid = "user"] { - background-color: #95EC69 !important; -} -/* 对话气泡 */ -[class *= "message"] { - border-radius: var(--radius-xl) !important; - border: none; - padding: var(--spacing-xl) !important; - font-size: var(--text-md) !important; - line-height: var(--line-md) !important; - min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); - min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); -} -[data-testid = "bot"] { - max-width: 85%; - border-bottom-left-radius: 0 !important; -} -[data-testid = "user"] { - max-width: 85%; - width: auto !important; - border-bottom-right-radius: 0 !important; -} -/* 表格 */ -table { - margin: 1em 0; - border-collapse: collapse; - empty-cells: show; -} -td,th { - border: 1.2px solid var(--border-color-primary) !important; - padding: 0.2em; -} -thead { - background-color: rgba(175,184,193,0.2); -} -thead th { - padding: .5em .2em; -} -/* 行内代码 */ -code { - display: inline; - white-space: break-spaces; - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 代码块 */ -pre code { - display: block; - overflow: auto; - white-space: pre; - background-color: hsla(0, 0%, 0%, 80%)!important; - border-radius: 10px; - padding: 1.4em 1.2em 0em 1.4em; - margin: 1.2em 2em 1.2em 0.5em; - color: #FFF; - box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2); -} -/* 代码高亮样式 */ -.highlight .hll { background-color: #49483e } -.highlight .c { color: #75715e } /* Comment */ -.highlight .err { color: #960050; background-color: #1e0010 } /* Error */ -.highlight .k { color: #66d9ef } /* Keyword */ -.highlight .l { color: #ae81ff } /* Literal */ -.highlight .n { color: #f8f8f2 } /* Name */ -.highlight .o { color: #f92672 } /* Operator */ -.highlight .p { color: #f8f8f2 } /* Punctuation */ -.highlight .ch { color: #75715e } /* Comment.Hashbang */ -.highlight .cm { color: #75715e } /* Comment.Multiline */ -.highlight .cp { color: #75715e } /* Comment.Preproc */ -.highlight .cpf { color: #75715e } /* Comment.PreprocFile */ -.highlight .c1 { color: #75715e } /* Comment.Single */ -.highlight .cs { color: #75715e } /* Comment.Special */ -.highlight .gd { color: #f92672 } /* Generic.Deleted */ -.highlight .ge { font-style: italic } /* Generic.Emph */ -.highlight .gi { color: #a6e22e } /* Generic.Inserted */ -.highlight .gs { font-weight: bold } /* Generic.Strong */ -.highlight .gu { color: #75715e } /* Generic.Subheading */ -.highlight .kc { color: #66d9ef } /* Keyword.Constant */ -.highlight .kd { color: #66d9ef } /* Keyword.Declaration */ -.highlight .kn { color: #f92672 } /* Keyword.Namespace */ -.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ -.highlight .kr { color: #66d9ef } /* Keyword.Reserved */ -.highlight .kt { color: #66d9ef } /* Keyword.Type */ -.highlight .ld { color: #e6db74 } /* Literal.Date */ -.highlight .m { color: #ae81ff } /* Literal.Number */ -.highlight .s { color: #e6db74 } /* Literal.String */ -.highlight .na { color: #a6e22e } /* Name.Attribute */ -.highlight .nb { color: #f8f8f2 } /* Name.Builtin */ -.highlight .nc { color: #a6e22e } /* Name.Class */ -.highlight .no { color: #66d9ef } /* Name.Constant */ -.highlight .nd { color: #a6e22e } /* Name.Decorator */ -.highlight .ni { color: #f8f8f2 } /* Name.Entity */ -.highlight .ne { color: #a6e22e } /* Name.Exception */ -.highlight .nf { color: #a6e22e } /* Name.Function */ -.highlight .nl { color: #f8f8f2 } /* Name.Label */ -.highlight .nn { color: #f8f8f2 } /* Name.Namespace */ -.highlight .nx { color: #a6e22e } /* Name.Other */ -.highlight .py { color: #f8f8f2 } /* Name.Property */ -.highlight .nt { color: #f92672 } /* Name.Tag */ -.highlight .nv { color: #f8f8f2 } /* Name.Variable */ -.highlight .ow { color: #f92672 } /* Operator.Word */ -.highlight .w { color: #f8f8f2 } /* Text.Whitespace */ -.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ -.highlight .mf { color: #ae81ff } /* Literal.Number.Float */ -.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ -.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ -.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ -.highlight .sa { color: #e6db74 } /* Literal.String.Affix */ -.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ -.highlight .sc { color: #e6db74 } /* Literal.String.Char */ -.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ -.highlight .sd { color: #e6db74 } /* Literal.String.Doc */ -.highlight .s2 { color: #e6db74 } /* Literal.String.Double */ -.highlight .se { color: #ae81ff } /* Literal.String.Escape */ -.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ -.highlight .si { color: #e6db74 } /* Literal.String.Interpol */ -.highlight .sx { color: #e6db74 } /* Literal.String.Other */ -.highlight .sr { color: #e6db74 } /* Literal.String.Regex */ -.highlight .s1 { color: #e6db74 } /* Literal.String.Single */ -.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ -.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ -.highlight .fm { color: #a6e22e } /* Name.Function.Magic */ -.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ -.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ -.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ -.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ -.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ diff --git a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/dnnlib/submission/__init__.py b/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/dnnlib/submission/__init__.py deleted file mode 100644 index 53856121d673459ae2b21ecef3d0fcb12a12cdfe..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/dnnlib/submission/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -from . import run_context -from . import submit diff --git a/spaces/yderre-aubay/midi-player-demo/src/common/track/validate.ts b/spaces/yderre-aubay/midi-player-demo/src/common/track/validate.ts deleted file mode 100644 index 3cd712de48ceb0089cdc6e90b1bada74c5261a3c..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/common/track/validate.ts +++ /dev/null @@ -1,15 +0,0 @@ -import { isNumber } from "lodash" -import { TrackEvent } from "./TrackEvent" - -export const validateMidiEvent = (e: TrackEvent) => { - Object.values(e).forEach((v) => { - if (isNumber(v)) { - if (!Number.isInteger(v)) { - console.warn("non integer is not allowed in MIDI", e, v) - } - if (v < 0) { - console.warn("minus value is not allowed in MIDI", e, v) - } - } - }) -} diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/Constants.ts b/spaces/yderre-aubay/midi-player-demo/src/main/Constants.ts deleted file mode 100644 index 1d91ac840cdaf7bd803e455330ed8c7d52eba5a2..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/Constants.ts +++ /dev/null @@ -1,12 +0,0 @@ -export const TIME_BASE = 480 - -export const Layout = { - pixelsPerTick: 0.1, - keyHeight: 12, - keyWidth: 64, - rulerHeight: 32, -} - -export const MaxNoteNumber = 127 - -export const WHEEL_SCROLL_RATE = 1 / 120 diff --git a/spaces/ygangang/VToonify/vtoonify/train_vtoonify_t.py b/spaces/ygangang/VToonify/vtoonify/train_vtoonify_t.py deleted file mode 100644 index 147d5f38a5b25822ab05f089173cd96c6aa22c12..0000000000000000000000000000000000000000 --- a/spaces/ygangang/VToonify/vtoonify/train_vtoonify_t.py +++ /dev/null @@ -1,432 +0,0 @@ -import os -#os.environ['CUDA_VISIBLE_DEVICES'] = "0" -import argparse -import math -import random - -import numpy as np -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils import data -import torch.distributed as dist -from torchvision import transforms, utils -from tqdm import tqdm -from PIL import Image -from util import * -from model.stylegan import lpips -from model.stylegan.model import Generator, Downsample -from model.vtoonify import VToonify, ConditionalDiscriminator -from model.bisenet.model import BiSeNet -from model.simple_augment import random_apply_affine -from model.stylegan.distributed import ( - get_rank, - synchronize, - reduce_loss_dict, - reduce_sum, - get_world_size, -) - -# In the paper, --weight for each style is set as follows, -# cartoon: default -# caricature: default -# pixar: 1 1 1 1 1 1 1 1 1 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 -# comic: 0.5 0.5 0.5 0.5 0.5 0.5 0.5 1 1 1 1 1 1 1 1 1 1 1 -# arcane: 0.5 0.5 0.5 0.5 0.5 0.5 0.5 1 1 1 1 1 1 1 1 1 1 1 - -class TrainOptions(): - def __init__(self): - - self.parser = argparse.ArgumentParser(description="Train VToonify-T") - self.parser.add_argument("--iter", type=int, default=2000, help="total training iterations") - self.parser.add_argument("--batch", type=int, default=8, help="batch sizes for each gpus") - self.parser.add_argument("--lr", type=float, default=0.0001, help="learning rate") - self.parser.add_argument("--local_rank", type=int, default=0, help="local rank for distributed training") - self.parser.add_argument("--start_iter", type=int, default=0, help="start iteration") - self.parser.add_argument("--save_every", type=int, default=30000, help="interval of saving a checkpoint") - self.parser.add_argument("--save_begin", type=int, default=30000, help="when to start saving a checkpoint") - self.parser.add_argument("--log_every", type=int, default=200, help="interval of saving an intermediate image result") - - self.parser.add_argument("--adv_loss", type=float, default=0.01, help="the weight of adv loss") - self.parser.add_argument("--grec_loss", type=float, default=0.1, help="the weight of mse recontruction loss") - self.parser.add_argument("--perc_loss", type=float, default=0.01, help="the weight of perceptual loss") - self.parser.add_argument("--tmp_loss", type=float, default=1.0, help="the weight of temporal consistency loss") - - self.parser.add_argument("--encoder_path", type=str, default=None, help="path to the pretrained encoder model") - self.parser.add_argument("--direction_path", type=str, default='./checkpoint/directions.npy', help="path to the editing direction latents") - self.parser.add_argument("--stylegan_path", type=str, default='./checkpoint/stylegan2-ffhq-config-f.pt', help="path to the stylegan model") - self.parser.add_argument("--finetunegan_path", type=str, default='./checkpoint/cartoon/finetune-000600.pt', help="path to the finetuned stylegan model") - self.parser.add_argument("--weight", type=float, nargs=18, default=[1]*9+[0]*9, help="the weight for blending two models") - self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model") - self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder") - - self.parser.add_argument("--name", type=str, default='vtoonify_t_cartoon', help="saved model name") - self.parser.add_argument("--pretrain", action="store_true", help="if true, only pretrain the encoder") - - def parse(self): - self.opt = self.parser.parse_args() - if self.opt.encoder_path is None: - self.opt.encoder_path = os.path.join('./checkpoint/', self.opt.name, 'pretrain.pt') - args = vars(self.opt) - if self.opt.local_rank == 0: - print('Load options') - for name, value in sorted(args.items()): - print('%s: %s' % (str(name), str(value))) - return self.opt - - -# pretrain E of vtoonify. -# We train E so that its the last-layer feature matches the original 8-th-layer input feature of G1 -# See Model initialization in Sec. 4.1.2 for the detail -def pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, basemodel, device): - pbar = range(args.iter) - - if get_rank() == 0: - pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01) - - recon_loss = torch.tensor(0.0, device=device) - loss_dict = {} - - if args.distributed: - g_module = generator.module - else: - g_module = generator - - accum = 0.5 ** (32 / (10 * 1000)) - - requires_grad(g_module.encoder, True) - - for idx in pbar: - i = idx + args.start_iter - - if i > args.iter: - print("Done!") - break - - with torch.no_grad(): - # during pretraining, no geometric transformations are applied. - noise_sample = torch.randn(args.batch, 512).cuda() - ws_ = basemodel.style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w - ws_[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w''=w'=w+n - img_gen, _ = basemodel([ws_], input_is_latent=True, truncation=0.5, truncation_latent=0) # image part of x' - img_gen = torch.clamp(img_gen, -1, 1).detach() - img_gen512 = down(img_gen.detach()) - img_gen256 = down(img_gen512.detach()) # image part of x'_down - mask512 = parsingpredictor(2*torch.clamp(img_gen512, -1, 1))[0] - real_input = torch.cat((img_gen256, down(mask512)/16.0), dim=1).detach() # x'_down - # f_G1^(8)(w'') - real_feat, real_skip = g_ema.generator([ws_], input_is_latent=True, return_feature_ind = 6, truncation=0.5, truncation_latent=0) - real_feat = real_feat.detach() - real_skip = real_skip.detach() - - # f_E^(last)(x'_down) - fake_feat, fake_skip = generator(real_input, style=None, return_feat=True) - - # L_E in Eq.(1) - recon_loss = F.mse_loss(fake_feat, real_feat) + F.mse_loss(fake_skip, real_skip) - - loss_dict["emse"] = recon_loss - - generator.zero_grad() - recon_loss.backward() - g_optim.step() - - accumulate(g_ema.encoder, g_module.encoder, accum) - - loss_reduced = reduce_loss_dict(loss_dict) - - emse_loss_val = loss_reduced["emse"].mean().item() - - if get_rank() == 0: - pbar.set_description( - ( - f"iter: {i:d}; emse: {emse_loss_val:.3f}" - ) - ) - - if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter: - if (i+1) == args.iter: - savename = f"checkpoint/%s/pretrain.pt"%(args.name) - else: - savename = f"checkpoint/%s/pretrain-%05d.pt"%(args.name, i+1) - torch.save( - { - #"g": g_module.encoder.state_dict(), - "g_ema": g_ema.encoder.state_dict(), - }, - savename, - ) - - -# generate paired data and train vtoonify, see Sec. 4.1.2 for the detail -def train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, basemodel, device): - pbar = range(args.iter) - - if get_rank() == 0: - pbar = tqdm(pbar, initial=args.start_iter, smoothing=0.01, ncols=120, dynamic_ncols=False) - - d_loss = torch.tensor(0.0, device=device) - g_loss = torch.tensor(0.0, device=device) - grec_loss = torch.tensor(0.0, device=device) - gfeat_loss = torch.tensor(0.0, device=device) - temporal_loss = torch.tensor(0.0, device=device) - loss_dict = {} - - if args.distributed: - g_module = generator.module - d_module = discriminator.module - - else: - g_module = generator - d_module = discriminator - - accum = 0.5 ** (32 / (10 * 1000)) - - for idx in pbar: - i = idx + args.start_iter - - if i > args.iter: - print("Done!") - break - - ###### This part is for data generation. Generate pair (x, y, w'') as in Fig. 5 of the paper - with torch.no_grad(): - noise_sample = torch.randn(args.batch, 512).cuda() - wc = basemodel.style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w - wc[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n - wc = wc.detach() - xc, _ = basemodel([wc], input_is_latent=True, truncation=0.5, truncation_latent=0) - xc = torch.clamp(xc, -1, 1).detach() # x' - xl = pspencoder(F.adaptive_avg_pool2d(xc, 256)) - xl = basemodel.style(xl.reshape(xl.shape[0]*xl.shape[1], xl.shape[2])).reshape(xl.shape) # E_s(x'_down) - xl = torch.cat((wc[:,0:7]*0.5, xl[:,7:18]), dim=1).detach() # w'' = concatenate w' and E_s(x'_down) - xs, _ = g_ema.generator([xl], input_is_latent=True) - xs = torch.clamp(xs, -1, 1).detach() # y' - # during training, random geometric transformations are applied. - imgs, _ = random_apply_affine(torch.cat((xc.detach(),xs), dim=1), 0.2, None) - real_input1024 = imgs[:,0:3].detach() # image part of x - real_input512 = down(real_input1024).detach() - real_input256 = down(real_input512).detach() - mask512 = parsingpredictor(2*real_input512)[0] - mask256 = down(mask512).detach() - mask = F.adaptive_avg_pool2d(mask512, 1024).detach() # parsing part of x - real_output = imgs[:,3:].detach() # y - real_input = torch.cat((real_input256, mask256/16.0), dim=1) # x_down - # for log, sample a fixed input-output pair (x_down, y, w'') - if idx == 0 or i == 0: - samplein = real_input.clone().detach() - sampleout = real_output.clone().detach() - samplexl = xl.clone().detach() - - ###### This part is for training discriminator - - requires_grad(g_module.encoder, False) - requires_grad(g_module.fusion_out, False) - requires_grad(g_module.fusion_skip, False) - requires_grad(discriminator, True) - - fake_output = generator(real_input, xl) - fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256)) - real_pred = discriminator(F.adaptive_avg_pool2d(real_output, 256)) - - # L_adv in Eq.(3) - d_loss = d_logistic_loss(real_pred, fake_pred) * args.adv_loss - loss_dict["d"] = d_loss - - discriminator.zero_grad() - d_loss.backward() - d_optim.step() - - ###### This part is for training generator (encoder and fusion modules) - - requires_grad(g_module.encoder, True) - requires_grad(g_module.fusion_out, True) - requires_grad(g_module.fusion_skip, True) - requires_grad(discriminator, False) - - fake_output = generator(real_input, xl) - fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256)) - # L_adv in Eq.(3) - g_loss = g_nonsaturating_loss(fake_pred) * args.adv_loss - # L_rec in Eq.(2) - grec_loss = F.mse_loss(fake_output, real_output) * args.grec_loss - gfeat_loss = percept(F.adaptive_avg_pool2d(fake_output, 512), # 1024 will out of memory - F.adaptive_avg_pool2d(real_output, 512)).sum() * args.perc_loss # 256 will get blurry output - - loss_dict["g"] = g_loss - loss_dict["gr"] = grec_loss - loss_dict["gf"] = gfeat_loss - - w = random.randint(0,1024-896) - h = random.randint(0,1024-896) - crop_input = torch.cat((real_input1024[:,:,w:w+896,h:h+896], mask[:,:,w:w+896,h:h+896]/16.0), dim=1).detach() - crop_input = down(down(crop_input)) - crop_fake_output = fake_output[:,:,w:w+896,h:h+896] - fake_crop_output = generator(crop_input, xl) - # L_tmp in Eq.(4), gradually increase the weight of L_tmp - temporal_loss = ((fake_crop_output-crop_fake_output)**2).mean() * max(idx/(args.iter/2.0)-1, 0) * args.tmp_loss - loss_dict["tp"] = temporal_loss - - generator.zero_grad() - (g_loss + grec_loss + gfeat_loss + temporal_loss).backward() - g_optim.step() - - accumulate(g_ema.encoder, g_module.encoder, accum) - accumulate(g_ema.fusion_out, g_module.fusion_out, accum) - accumulate(g_ema.fusion_skip, g_module.fusion_skip, accum) - - loss_reduced = reduce_loss_dict(loss_dict) - - d_loss_val = loss_reduced["d"].mean().item() - g_loss_val = loss_reduced["g"].mean().item() - gr_loss_val = loss_reduced["gr"].mean().item() - gf_loss_val = loss_reduced["gf"].mean().item() - tmp_loss_val = loss_reduced["tp"].mean().item() - - if get_rank() == 0: - pbar.set_description( - ( - f"iter: {i:d}; advd: {d_loss_val:.3f}; advg: {g_loss_val:.3f}; mse: {gr_loss_val:.3f}; " - f"perc: {gf_loss_val:.3f}; tmp: {tmp_loss_val:.3f}" - ) - ) - - if i % args.log_every == 0 or (i+1) == args.iter: - with torch.no_grad(): - g_ema.eval() - sample = g_ema(samplein, samplexl) - sample = F.interpolate(torch.cat((sampleout, sample), dim=0), 256) - utils.save_image( - sample, - f"log/%s/%05d.jpg"%(args.name, i), - nrow=int(args.batch), - normalize=True, - range=(-1, 1), - ) - - if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter: - if (i+1) == args.iter: - savename = f"checkpoint/%s/vtoonify.pt"%(args.name) - else: - savename = f"checkpoint/%s/vtoonify_%05d.pt"%(args.name, i+1) - torch.save( - { - #"g": g_module.state_dict(), - #"d": d_module.state_dict(), - "g_ema": g_ema.state_dict(), - }, - savename, - ) - - - -if __name__ == "__main__": - - device = "cuda" - parser = TrainOptions() - args = parser.parse() - if args.local_rank == 0: - print('*'*98) - if not os.path.exists("log/%s/"%(args.name)): - os.makedirs("log/%s/"%(args.name)) - if not os.path.exists("checkpoint/%s/"%(args.name)): - os.makedirs("checkpoint/%s/"%(args.name)) - - n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 - args.distributed = n_gpu > 1 - - if args.distributed: - torch.cuda.set_device(args.local_rank) - torch.distributed.init_process_group(backend="nccl", init_method="env://") - synchronize() - - generator = VToonify(backbone = 'toonify').to(device) - generator.apply(weights_init) - g_ema = VToonify(backbone = 'toonify').to(device) - g_ema.eval() - - basemodel = Generator(1024, 512, 8, 2).to(device) # G0 - finetunemodel = Generator(1024, 512, 8, 2).to(device) - basemodel.load_state_dict(torch.load(args.stylegan_path, map_location=lambda storage, loc: storage)['g_ema']) - finetunemodel.load_state_dict(torch.load(args.finetunegan_path, map_location=lambda storage, loc: storage)['g_ema']) - fused_state_dict = blend_models(finetunemodel, basemodel, args.weight) # G1 - generator.generator.load_state_dict(fused_state_dict) # load G1 - g_ema.generator.load_state_dict(fused_state_dict) - requires_grad(basemodel, False) - requires_grad(generator.generator, False) - requires_grad(g_ema.generator, False) - - if not args.pretrain: - generator.encoder.load_state_dict(torch.load(args.encoder_path, map_location=lambda storage, loc: storage)["g_ema"]) - # we initialize the fusion modules to map f_G \otimes f_E to f_G. - for k in generator.fusion_out: - k.weight.data *= 0.01 - k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda() - for k in generator.fusion_skip: - k.weight.data *= 0.01 - k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda() - - accumulate(g_ema.encoder, generator.encoder, 0) - accumulate(g_ema.fusion_out, generator.fusion_out, 0) - accumulate(g_ema.fusion_skip, generator.fusion_skip, 0) - - g_parameters = list(generator.encoder.parameters()) - if not args.pretrain: - g_parameters = g_parameters + list(generator.fusion_out.parameters()) + list(generator.fusion_skip.parameters()) - - g_optim = optim.Adam( - g_parameters, - lr=args.lr, - betas=(0.9, 0.99), - ) - - if args.distributed: - generator = nn.parallel.DistributedDataParallel( - generator, - device_ids=[args.local_rank], - output_device=args.local_rank, - broadcast_buffers=False, - find_unused_parameters=True, - ) - - parsingpredictor = BiSeNet(n_classes=19) - parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage)) - parsingpredictor.to(device).eval() - requires_grad(parsingpredictor, False) - - # we apply gaussian blur to the images to avoid flickers caused during downsampling - down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device) - requires_grad(down, False) - - directions = torch.tensor(np.load(args.direction_path)).to(device) - - if not args.pretrain: - discriminator = ConditionalDiscriminator(256).to(device) - - d_optim = optim.Adam( - discriminator.parameters(), - lr=args.lr, - betas=(0.9, 0.99), - ) - - if args.distributed: - discriminator = nn.parallel.DistributedDataParallel( - discriminator, - device_ids=[args.local_rank], - output_device=args.local_rank, - broadcast_buffers=False, - find_unused_parameters=True, - ) - - percept = lpips.PerceptualLoss(model="net-lin", net="vgg", use_gpu=device.startswith("cuda"), gpu_ids=[args.local_rank]) - requires_grad(percept.model.net, False) - - pspencoder = load_psp_standalone(args.style_encoder_path, device) - - if args.local_rank == 0: - print('Load models and data successfully loaded!') - - if args.pretrain: - pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, basemodel, device) - else: - train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, basemodel, device) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/lilt/configuration_lilt.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/lilt/configuration_lilt.py deleted file mode 100644 index d11899c94312adfc4be612aad56f4e884b457fc5..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/lilt/configuration_lilt.py +++ /dev/null @@ -1,133 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" LiLT configuration""" - -from ...configuration_utils import PretrainedConfig -from ...utils import logging - - -logger = logging.get_logger(__name__) - -LILT_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "SCUT-DLVCLab/lilt-roberta-en-base": ( - "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" - ), -} - - -class LiltConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`LiltModel`]. It is used to instantiate a LiLT - model according to the specified arguments, defining the model architecture. Instantiating a configuration with the - defaults will yield a similar configuration to that of the LiLT - [SCUT-DLVCLab/lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) architecture. - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - Args: - vocab_size (`int`, *optional*, defaults to 30522): - Vocabulary size of the LiLT model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`LiltModel`]. - hidden_size (`int`, *optional*, defaults to 768): - Dimensionality of the encoder layers and the pooler layer. Should be a multiple of 24. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - num_attention_heads (`int`, *optional*, defaults to 12): - Number of attention heads for each attention layer in the Transformer encoder. - intermediate_size (`int`, *optional*, defaults to 3072): - Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. - hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"silu"` and `"gelu_new"` are supported. - hidden_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention probabilities. - max_position_embeddings (`int`, *optional*, defaults to 512): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - type_vocab_size (`int`, *optional*, defaults to 2): - The vocabulary size of the `token_type_ids` passed when calling [`LiltModel`]. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - layer_norm_eps (`float`, *optional*, defaults to 1e-12): - The epsilon used by the layer normalization layers. - position_embedding_type (`str`, *optional*, defaults to `"absolute"`): - Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For - positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to - [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). - For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models - with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). - classifier_dropout (`float`, *optional*): - The dropout ratio for the classification head. - channel_shrink_ratio (`int`, *optional*, defaults to 4): - The shrink ratio compared to the `hidden_size` for the channel dimension of the layout embeddings. - max_2d_position_embeddings (`int`, *optional*, defaults to 1024): - The maximum value that the 2D position embedding might ever be used with. Typically set this to something - large just in case (e.g., 1024). - - Examples: - - ```python - >>> from transformers import LiltConfig, LiltModel - - >>> # Initializing a LiLT SCUT-DLVCLab/lilt-roberta-en-base style configuration - >>> configuration = LiltConfig() - >>> # Randomly initializing a model from the SCUT-DLVCLab/lilt-roberta-en-base style configuration - >>> model = LiltModel(configuration) - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - model_type = "lilt" - - def __init__( - self, - vocab_size=30522, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=2, - initializer_range=0.02, - layer_norm_eps=1e-12, - pad_token_id=0, - position_embedding_type="absolute", - classifier_dropout=None, - channel_shrink_ratio=4, - max_2d_position_embeddings=1024, - **kwargs, - ): - super().__init__(pad_token_id=pad_token_id, **kwargs) - - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.hidden_act = hidden_act - self.intermediate_size = intermediate_size - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - self.layer_norm_eps = layer_norm_eps - self.position_embedding_type = position_embedding_type - self.classifier_dropout = classifier_dropout - self.channel_shrink_ratio = channel_shrink_ratio - self.max_2d_position_embeddings = max_2d_position_embeddings diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mgp_str/modeling_mgp_str.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mgp_str/modeling_mgp_str.py deleted file mode 100644 index 5d1f5bea7bfd357c7b09417f4f07ae08b54c8245..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mgp_str/modeling_mgp_str.py +++ /dev/null @@ -1,518 +0,0 @@ -# coding=utf-8 -# Copyright 2023 Alibaba Research and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch MGP-STR model.""" - -import collections.abc -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from torch import nn - -from ...modeling_outputs import BaseModelOutput -from ...modeling_utils import PreTrainedModel -from ...utils import ( - ModelOutput, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from .configuration_mgp_str import MgpstrConfig - - -logger = logging.get_logger(__name__) - -# General docstring -_CONFIG_FOR_DOC = "MgpstrConfig" -_TOKENIZER_FOR_DOC = "MgpstrTokenizer" - -# Base docstring -_CHECKPOINT_FOR_DOC = "alibaba-damo/mgp-str-base" - -MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "alibaba-damo/mgp-str-base", - # See all MGP-STR models at https://huggingface.co/models?filter=mgp-str -] - - -# Copied from transformers.models.beit.modeling_beit.drop_path -def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: - """ - Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - - Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, - however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the - layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the - argument. - """ - if drop_prob == 0.0 or not training: - return input - keep_prob = 1 - drop_prob - shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) - random_tensor.floor_() # binarize - output = input.div(keep_prob) * random_tensor - return output - - -# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Mgpstr -class MgpstrDropPath(nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" - - def __init__(self, drop_prob: Optional[float] = None) -> None: - super().__init__() - self.drop_prob = drop_prob - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - return drop_path(hidden_states, self.drop_prob, self.training) - - def extra_repr(self) -> str: - return "p={}".format(self.drop_prob) - - -@dataclass -class MgpstrModelOutput(ModelOutput): - """ - Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. - - Args: - logits (`tuple(torch.FloatTensor)` of shape `(batch_size, config.num_character_labels)`): - Tuple of `torch.FloatTensor` (one for the output of character of shape `(batch_size, - config.max_token_length, config.num_character_labels)`, + one for the output of bpe of shape `(batch_size, - config.max_token_length, config.num_bpe_labels)`, + one for the output of wordpiece of shape `(batch_size, - config.max_token_length, config.num_wordpiece_labels)`) . - - Classification scores (before SoftMax) of character, bpe and wordpiece. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + - one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, config.max_token_length, - sequence_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - a3_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_a3_attentions=True` is passed or when `config.output_a3_attentions=True`): - Tuple of `torch.FloatTensor` (one for the attention of character, + one for the attention of bpe`, + one - for the attention of wordpiece) of shape `(batch_size, config.max_token_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - logits: Tuple[torch.FloatTensor] = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - a3_attentions: Optional[Tuple[torch.FloatTensor]] = None - - -class MgpstrEmbeddings(nn.Module): - """2D Image to Patch Embedding""" - - def __init__(self, config: MgpstrConfig): - super().__init__() - image_size = ( - config.image_size - if isinstance(config.image_size, collections.abc.Iterable) - else (config.image_size, config.image_size) - ) - patch_size = ( - config.patch_size - if isinstance(config.patch_size, collections.abc.Iterable) - else (config.patch_size, config.patch_size) - ) - self.image_size = image_size - self.patch_size = patch_size - self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) - self.num_patches = self.grid_size[0] * self.grid_size[1] - self.num_tokens = 2 if config.distilled else 1 - - self.proj = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size) - - self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) - - self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + self.num_tokens, config.hidden_size)) - self.pos_drop = nn.Dropout(p=config.drop_rate) - - def forward(self, pixel_values): - batch_size, channel, height, width = pixel_values.shape - if height != self.image_size[0] or width != self.image_size[1]: - raise ValueError( - f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." - ) - - patch_embeddings = self.proj(pixel_values) - patch_embeddings = patch_embeddings.flatten(2).transpose(1, 2) # BCHW -> BNC - - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - embedding_output = torch.cat((cls_tokens, patch_embeddings), dim=1) - embedding_output = embedding_output + self.pos_embed - embedding_output = self.pos_drop(embedding_output) - - return embedding_output - - -class MgpstrMlp(nn.Module): - """MLP as used in Vision Transformer, MLP-Mixer and related networks""" - - def __init__(self, config: MgpstrConfig, hidden_features): - super().__init__() - hidden_features = hidden_features or config.hidden_size - self.fc1 = nn.Linear(config.hidden_size, hidden_features) - self.act = nn.GELU() - self.fc2 = nn.Linear(hidden_features, config.hidden_size) - self.drop = nn.Dropout(config.drop_rate) - - def forward(self, hidden_states): - hidden_states = self.fc1(hidden_states) - hidden_states = self.act(hidden_states) - hidden_states = self.drop(hidden_states) - hidden_states = self.fc2(hidden_states) - hidden_states = self.drop(hidden_states) - return hidden_states - - -class MgpstrAttention(nn.Module): - def __init__(self, config: MgpstrConfig): - super().__init__() - self.num_heads = config.num_attention_heads - head_dim = config.hidden_size // config.num_attention_heads - self.scale = head_dim**-0.5 - - self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias) - self.attn_drop = nn.Dropout(config.attn_drop_rate) - self.proj = nn.Linear(config.hidden_size, config.hidden_size) - self.proj_drop = nn.Dropout(config.drop_rate) - - def forward(self, hidden_states): - batch_size, num, channel = hidden_states.shape - qkv = ( - self.qkv(hidden_states) - .reshape(batch_size, num, 3, self.num_heads, channel // self.num_heads) - .permute(2, 0, 3, 1, 4) - ) - query, key, value = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - attention_probs = (query @ key.transpose(-2, -1)) * self.scale - attention_probs = attention_probs.softmax(dim=-1) - attention_probs = self.attn_drop(attention_probs) - - context_layer = (attention_probs @ value).transpose(1, 2).reshape(batch_size, num, channel) - context_layer = self.proj(context_layer) - context_layer = self.proj_drop(context_layer) - return (context_layer, attention_probs) - - -class MgpstrLayer(nn.Module): - def __init__(self, config: MgpstrConfig, drop_path=None): - super().__init__() - self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.attn = MgpstrAttention(config) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = MgpstrDropPath(drop_path) if drop_path is not None else nn.Identity() - self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - mlp_hidden_dim = int(config.hidden_size * config.mlp_ratio) - self.mlp = MgpstrMlp(config, mlp_hidden_dim) - - def forward(self, hidden_states): - self_attention_outputs = self.attn(self.norm1(hidden_states)) - attention_output = self_attention_outputs[0] - outputs = self_attention_outputs[1] - - # first residual connection - hidden_states = self.drop_path(attention_output) + hidden_states - - # second residual connection is done here - layer_output = hidden_states + self.drop_path(self.mlp(self.norm2(hidden_states))) - - outputs = (layer_output, outputs) - return outputs - - -class MgpstrEncoder(nn.Module): - def __init__(self, config: MgpstrConfig): - super().__init__() - # stochastic depth decay rule - dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)] - - self.blocks = nn.Sequential( - *[MgpstrLayer(config=config, drop_path=dpr[i]) for i in range(config.num_hidden_layers)] - ) - - def forward(self, hidden_states, output_attentions=False, output_hidden_states=False, return_dict=True): - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - - for _, blk in enumerate(self.blocks): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_outputs = blk(hidden_states) - hidden_states = layer_outputs[0] - - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) - - -class MgpstrA3Module(nn.Module): - def __init__(self, config: MgpstrConfig): - super().__init__() - self.token_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.tokenLearner = nn.Sequential( - nn.Conv2d(config.hidden_size, config.hidden_size, kernel_size=(1, 1), stride=1, groups=8, bias=False), - nn.Conv2d(config.hidden_size, config.max_token_length, kernel_size=(1, 1), stride=1, bias=False), - ) - self.feat = nn.Conv2d( - config.hidden_size, config.hidden_size, kernel_size=(1, 1), stride=1, groups=8, bias=False - ) - self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states): - hidden_states = self.token_norm(hidden_states) - hidden_states = hidden_states.transpose(1, 2).unsqueeze(-1) - selected = self.tokenLearner(hidden_states) - selected = selected.flatten(2) - attentions = F.softmax(selected, dim=-1) - - feat = self.feat(hidden_states) - feat = feat.flatten(2).transpose(1, 2) - feat = torch.einsum("...si,...id->...sd", attentions, feat) - a3_out = self.norm(feat) - - return (a3_out, attentions) - - -class MgpstrPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = MgpstrConfig - base_model_prefix = "mgp_str" - - def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: - """Initialize the weights""" - if isinstance(module, MgpstrEmbeddings): - nn.init.trunc_normal_(module.pos_embed, mean=0.0, std=self.config.initializer_range) - nn.init.trunc_normal_(module.cls_token, mean=0.0, std=self.config.initializer_range) - elif isinstance(module, (nn.Linear, nn.Conv2d)): - module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def _set_gradient_checkpointing(self, module: MgpstrEncoder, value: bool = False) -> None: - if isinstance(module, MgpstrEncoder): - module.gradient_checkpointing = value - - -MGP_STR_START_DOCSTRING = r""" - This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it - as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and - behavior. - - Parameters: - config ([`MgpstrConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -MGP_STR_INPUTS_DOCSTRING = r""" - Args: - pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] - for details. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare MGP-STR Model transformer outputting raw hidden-states without any specific head on top.", - MGP_STR_START_DOCSTRING, -) -class MgpstrModel(MgpstrPreTrainedModel): - def __init__(self, config: MgpstrConfig): - super().__init__(config) - self.config = config - self.embeddings = MgpstrEmbeddings(config) - self.encoder = MgpstrEncoder(config) - - def get_input_embeddings(self) -> nn.Module: - return self.embeddings.proj - - @add_start_docstrings_to_model_forward(MGP_STR_INPUTS_DOCSTRING) - def forward( - self, - pixel_values: torch.FloatTensor, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if pixel_values is None: - raise ValueError("You have to specify pixel_values") - - embedding_output = self.embeddings(pixel_values) - - encoder_outputs = self.encoder( - embedding_output, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - if not return_dict: - return encoder_outputs - return BaseModelOutput( - last_hidden_state=encoder_outputs.last_hidden_state, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - ) - - -@add_start_docstrings( - """ - MGP-STR Model transformer with three classification heads on top (three A^3 modules and three linear layer on top - of the transformer encoder output) for scene text recognition (STR) . - """, - MGP_STR_START_DOCSTRING, -) -class MgpstrForSceneTextRecognition(MgpstrPreTrainedModel): - config_class = MgpstrConfig - main_input_name = "pixel_values" - - def __init__(self, config: MgpstrConfig) -> None: - super().__init__(config) - - self.num_labels = config.num_labels - self.mgp_str = MgpstrModel(config) - - self.char_a3_module = MgpstrA3Module(config) - self.bpe_a3_module = MgpstrA3Module(config) - self.wp_a3_module = MgpstrA3Module(config) - - self.char_head = nn.Linear(config.hidden_size, config.num_character_labels) - self.bpe_head = nn.Linear(config.hidden_size, config.num_bpe_labels) - self.wp_head = nn.Linear(config.hidden_size, config.num_wordpiece_labels) - - @add_start_docstrings_to_model_forward(MGP_STR_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=MgpstrModelOutput, config_class=MgpstrConfig) - def forward( - self, - pixel_values: torch.FloatTensor, - output_attentions: Optional[bool] = None, - output_a3_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.FloatTensor], MgpstrModelOutput]: - r""" - output_a3_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of a3 modules. See `a3_attentions` under returned tensors - for more detail. - - Returns: - - Example: - - ```python - >>> from transformers import ( - ... MgpstrProcessor, - ... MgpstrForSceneTextRecognition, - ... ) - >>> import requests - >>> from PIL import Image - - >>> # load image from the IIIT-5k dataset - >>> url = "https://i.postimg.cc/ZKwLg2Gw/367-14.png" - >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") - - >>> processor = MgpstrProcessor.from_pretrained("alibaba-damo/mgp-str-base") - >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values - - >>> model = MgpstrForSceneTextRecognition.from_pretrained("alibaba-damo/mgp-str-base") - - >>> # inference - >>> outputs = model(pixel_values) - >>> out_strs = processor.batch_decode(outputs.logits) - >>> out_strs["generated_text"] - '["ticket"]' - ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - mgp_outputs = self.mgp_str( - pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = mgp_outputs[0] - - char_a3_out, char_attention = self.char_a3_module(sequence_output) - bpe_a3_out, bpe_attention = self.bpe_a3_module(sequence_output) - wp_a3_out, wp_attention = self.wp_a3_module(sequence_output) - - char_logits = self.char_head(char_a3_out) - bpe_logits = self.bpe_head(bpe_a3_out) - wp_logits = self.wp_head(wp_a3_out) - - all_a3_attentions = (char_attention, bpe_attention, wp_attention) if output_a3_attentions else None - all_logits = (char_logits, bpe_logits, wp_logits) - - if not return_dict: - outputs = (all_logits, all_a3_attentions) + mgp_outputs[1:] - return tuple(output for output in outputs if output is not None) - return MgpstrModelOutput( - logits=all_logits, - hidden_states=mgp_outputs.hidden_states, - attentions=mgp_outputs.attentions, - a3_attentions=all_a3_attentions, - ) diff --git a/spaces/yuangongfdu/whisper-at/README.md b/spaces/yuangongfdu/whisper-at/README.md deleted file mode 100644 index aaf5bd5b45c9835e3b7bc3b8ecbf9466b9d57626..0000000000000000000000000000000000000000 --- a/spaces/yuangongfdu/whisper-at/README.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Whisper At -emoji: 🏢 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: mit ---- - -This is a demo for Whisper-AT in the paper, ["Whisper-AT: Noise-Robust Automatic Speech Recognizers are Also Strong General Audio Event Taggers"](https://arxiv.org/abs/2307.03183) - -``` -@inproceedings{gong_whisperat, - author={Gong, Yuan and Khurana, Sameer and Karlinsky, Leonid and Glass, James}, - title={Whisper-AT: Noise-Robust Automatic Speech Recognizers are Also Strong Audio Event Taggers}, - year=2023, - booktitle={Proc. Interspeech 2023} -} -``` diff --git a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/flask_api.py b/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/flask_api.py deleted file mode 100644 index dff87134620d6ec00e6c8950ccf6313946216af8..0000000000000000000000000000000000000000 --- a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/flask_api.py +++ /dev/null @@ -1,62 +0,0 @@ -import io -import logging - -import soundfile -import torch -import torchaudio -from flask import Flask, request, send_file -from flask_cors import CORS - -from inference.infer_tool import Svc, RealTimeVC - -app = Flask(__name__) - -CORS(app) - -logging.getLogger('numba').setLevel(logging.WARNING) - - -@app.route("/voiceChangeModel", methods=["POST"]) -def voice_change_model(): - request_form = request.form - wave_file = request.files.get("sample", None) - # pitch changing information - f_pitch_change = float(request_form.get("fPitchChange", 0)) - # DAW required sampling rate - daw_sample = int(float(request_form.get("sampleRate", 0))) - speaker_id = int(float(request_form.get("sSpeakId", 0))) - # get wav from http and convert - input_wav_path = io.BytesIO(wave_file.read()) - - # inference - if raw_infer: - # out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path) - out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path, cluster_infer_ratio=0, - auto_predict_f0=False, noice_scale=0.4, f0_filter=False) - tar_audio = torchaudio.functional.resample(out_audio, svc_model.target_sample, daw_sample) - else: - out_audio = svc.process(svc_model, speaker_id, f_pitch_change, input_wav_path, cluster_infer_ratio=0, - auto_predict_f0=False, noice_scale=0.4, f0_filter=False) - tar_audio = torchaudio.functional.resample(torch.from_numpy(out_audio), svc_model.target_sample, daw_sample) - # return - out_wav_path = io.BytesIO() - soundfile.write(out_wav_path, tar_audio.cpu().numpy(), daw_sample, format="wav") - out_wav_path.seek(0) - return send_file(out_wav_path, download_name="temp.wav", as_attachment=True) - - -if __name__ == '__main__': - # True means splice directly. There may be explosive sounds at the splice. - # False means use cross fade. There may be slight overlapping sounds at the splice. - # Using 0.3-0.5s in VST plugin can reduce latency. - # You can adjust the maximum slicing time of VST plugin to 1 second and set it to ture here to get a stable sound quality and a relatively large delay。 - # Choose an acceptable method on your own. - raw_infer = True - # each model and config are corresponding - model_name = "logs/32k/G_174000-Copy1.pth" - config_name = "configs/config.json" - cluster_model_path = "logs/44k/kmeans_10000.pt" - svc_model = Svc(model_name, config_name, cluster_model_path=cluster_model_path) - svc = RealTimeVC() - # corresponding to the vst plugin here - app.run(port=6842, host="0.0.0.0", debug=False, threaded=False) diff --git a/spaces/zenafey/fast-stable-diffusion/inference.py b/spaces/zenafey/fast-stable-diffusion/inference.py deleted file mode 100644 index ec333eb410c0ede6c70553c4b8a955b7b197ab51..0000000000000000000000000000000000000000 --- a/spaces/zenafey/fast-stable-diffusion/inference.py +++ /dev/null @@ -1,97 +0,0 @@ -from prodiapy import Custom -from prodiapy.util import load -from PIL import Image -from threading import Thread -from utils import image_to_base64 -import gradio as gr -import gradio_user_history as gr_user_history -import os - -pipe = Custom(os.getenv("PRODIA_API_KEY")) - - -def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed, batch_count, profile: gr.OAuthProfile | None): - total_images = [] - threads = [] - - def generate_one_image(): - result = pipe.create( - "/sd/generate", - prompt=prompt, - negative_prompt=negative_prompt, - model=model, - steps=steps, - cfg_scale=cfg_scale, - sampler=sampler, - width=width, - height=height, - seed=seed - ) - job = pipe.wait_for(result) - total_images.append(job['imageUrl']) - - for x in range(batch_count): - t = Thread(target=generate_one_image) - threads.append(t) - t.start() - - for t in threads: - t.join() - - for image in total_images: - gr_user_history.save_image(label=prompt, image=Image.open(load(image)), profile=profile) - - return gr.update(value=total_images, preview=False) - - -def img2img(input_image, denoising, prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed, - batch_count): - if input_image is None: - return - - total_images = [] - threads = [] - - def generate_one_image(): - result = pipe.create( - "/sd/transform", - imageData=image_to_base64(input_image), - denoising_strength=denoising, - prompt=prompt, - negative_prompt=negative_prompt, - model=model, - steps=steps, - cfg_scale=cfg_scale, - sampler=sampler, - width=width, - height=height, - seed=seed - - ) - job = pipe.wait_for(result) - total_images.append(job['imageUrl']) - - for x in range(batch_count): - t = Thread(target=generate_one_image) - threads.append(t) - t.start() - - for t in threads: - t.join() - - return gr.update(value=total_images, preview=False) - - -def upscale(image, scale, profile: gr.OAuthProfile | None): - if image is None: - return - - job = pipe.create( - '/upscale', - imageData=image_to_base64(image), - resize=scale - ) - image = pipe.wait_for(job)['imageUrl'] - gr_user_history.save_image(label=f'upscale by {scale}', image=Image.open(load(image)), profile=profile) - - return image diff --git a/spaces/zhang-wei-jian/docker/node_modules/cookies/README.md b/spaces/zhang-wei-jian/docker/node_modules/cookies/README.md deleted file mode 100644 index f7c12f8d2128a31bdbf467f4c2b1fa48380c9711..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/cookies/README.md +++ /dev/null @@ -1,145 +0,0 @@ -Cookies -======= - -[![NPM Version][npm-image]][npm-url] -[![NPM Downloads][downloads-image]][downloads-url] -[![Node.js Version][node-version-image]][node-version-url] -[![Build Status][travis-image]][travis-url] -[![Test Coverage][coveralls-image]][coveralls-url] - -Cookies is a [node.js](http://nodejs.org/) module for getting and setting HTTP(S) cookies. Cookies can be signed to prevent tampering, using [Keygrip](https://www.npmjs.com/package/keygrip). It can be used with the built-in node.js HTTP library, or as Connect/Express middleware. - -## Install - -This is a [Node.js](https://nodejs.org/en/) module available through the -[npm registry](https://www.npmjs.com/). Installation is done using the -[`npm install` command](https://docs.npmjs.com/getting-started/installing-npm-packages-locally): - -``` -$ npm install cookies -``` - -## Features - -* **Lazy**: Since cookie verification against multiple keys could be expensive, cookies are only verified lazily when accessed, not eagerly on each request. - -* **Secure**: All cookies are `httponly` by default, and cookies sent over SSL are `secure` by default. An error will be thrown if you try to send secure cookies over an insecure socket. - -* **Unobtrusive**: Signed cookies are stored the same way as unsigned cookies, instead of in an obfuscated signing format. An additional signature cookie is stored for each signed cookie, using a standard naming convention (_cookie-name_`.sig`). This allows other libraries to access the original cookies without having to know the signing mechanism. - -* **Agnostic**: This library is optimized for use with [Keygrip](https://www.npmjs.com/package/keygrip), but does not require it; you can implement your own signing scheme instead if you like and use this library only to read/write cookies. Factoring the signing into a separate library encourages code reuse and allows you to use the same signing library for other areas where signing is needed, such as in URLs. - -## API - -### cookies = new Cookies( request, response, [ options ] ) - -This creates a cookie jar corresponding to the current _request_ and _response_, additionally passing an object _options_. - -A [Keygrip](https://www.npmjs.com/package/keygrip) object or an array of keys can optionally be passed as _options.keys_ to enable cryptographic signing based on SHA1 HMAC, using rotated credentials. - -A Boolean can optionally be passed as _options.secure_ to explicitally specify if the connection is secure, rather than this module examining _request_. - -Note that since this only saves parameters without any other processing, it is very lightweight. Cookies are only parsed on demand when they are accessed. - -### express.createServer( Cookies.express( keys ) ) - -This adds cookie support as a Connect middleware layer for use in Express apps, allowing inbound cookies to be read using `req.cookies.get` and outbound cookies to be set using `res.cookies.set`. - -### cookies.get( name, [ options ] ) - -This extracts the cookie with the given name from the `Cookie` header in the request. If such a cookie exists, its value is returned. Otherwise, nothing is returned. - -`{ signed: true }` can optionally be passed as the second parameter _options_. In this case, a signature cookie (a cookie of same name ending with the `.sig` suffix appended) is fetched. If no such cookie exists, nothing is returned. - -If the signature cookie _does_ exist, the provided [Keygrip](https://www.npmjs.com/package/keygrip) object is used to check whether the hash of _cookie-name_=_cookie-value_ matches that of any registered key: - -* If the signature cookie hash matches the first key, the original cookie value is returned. -* If the signature cookie hash matches any other key, the original cookie value is returned AND an outbound header is set to update the signature cookie's value to the hash of the first key. This enables automatic freshening of signature cookies that have become stale due to key rotation. -* If the signature cookie hash does not match any key, nothing is returned, and an outbound header with an expired date is used to delete the cookie. - -### cookies.set( name, [ value ], [ options ] ) - -This sets the given cookie in the response and returns the current context to allow chaining. - -If the _value_ is omitted, an outbound header with an expired date is used to delete the cookie. - -If the _options_ object is provided, it will be used to generate the outbound cookie header as follows: - -* `maxAge`: a number representing the milliseconds from `Date.now()` for expiry -* `expires`: a `Date` object indicating the cookie's expiration date (expires at the end of session by default). -* `path`: a string indicating the path of the cookie (`/` by default). -* `domain`: a string indicating the domain of the cookie (no default). -* `secure`: a boolean indicating whether the cookie is only to be sent over HTTPS (`false` by default for HTTP, `true` by default for HTTPS). [Read more about this option below](#secure-cookies). -* `httpOnly`: a boolean indicating whether the cookie is only to be sent over HTTP(S), and not made available to client JavaScript (`true` by default). -* `sameSite`: a boolean or string indicating whether the cookie is a "same site" cookie (`false` by default). This can be set to `'strict'`, `'lax'`, or `true` (which maps to `'strict'`). -* `signed`: a boolean indicating whether the cookie is to be signed (`false` by default). If this is true, another cookie of the same name with the `.sig` suffix appended will also be sent, with a 27-byte url-safe base64 SHA1 value representing the hash of _cookie-name_=_cookie-value_ against the first [Keygrip](https://www.npmjs.com/package/keygrip) key. This signature key is used to detect tampering the next time a cookie is received. -* `overwrite`: a boolean indicating whether to overwrite previously set cookies of the same name (`false` by default). If this is true, all cookies set during the same request with the same name (regardless of path or domain) are filtered out of the Set-Cookie header when setting this cookie. - -### Secure cookies - -To send a secure cookie, you set a cookie with the `secure: true` option. - -HTTPS is necessary for secure cookies. When `cookies.set` is called with `secure: true` and a secure connection is not detected, the cookie will not be set and an error will be thrown. - -This module will test each request to see if it's secure by checking: - -* if the `protocol` property of the request is set to `https`, or -* if the `connection.encrypted` property of the request is set to `true`. - -If your server is running behind a proxy and you are using `secure: true`, you need to configure your server to read the request headers added by your proxy to determine whether the request is using a secure connection. - -For more information about working behind proxies, consult the framework you are using: - -* For Koa - [`app.proxy = true`](http://koajs.com/#settings) -* For Express - [trust proxy setting](http://expressjs.com/en/4x/api.html#trust.proxy.options.table) - -If your Koa or Express server is properly configured, the `protocol` property of the request will be set to match the protocol reported by the proxy in the `X-Forwarded-Proto` header. - -## Example - -```js -var http = require('http') -var Cookies = require('cookies') - -// Optionally define keys to sign cookie values -// to prevent client tampering -var keys = ['keyboard cat'] - -var server = http.createServer(function (req, res) { - // Create a cookies object - var cookies = new Cookies(req, res, { keys: keys }) - - // Get a cookie - var lastVisit = cookies.get('LastVisit', { signed: true }) - - // Set the cookie to a value - cookies.set('LastVisit', new Date().toISOString(), { signed: true }) - - if (!lastVisit) { - res.setHeader('Content-Type', 'text/plain') - res.end('Welcome, first time visitor!') - } else { - res.setHeader('Content-Type', 'text/plain') - res.end('Welcome back! Nothing much changed since your last visit at ' + lastVisit + '.') - } -}) - -server.listen(3000, function () { - console.log('Visit us at http://127.0.0.1:3000/ !') -}) -``` - -## License - -[MIT](LICENSE) - -[npm-image]: https://img.shields.io/npm/v/cookies.svg -[npm-url]: https://npmjs.org/package/cookies -[coveralls-image]: https://img.shields.io/coveralls/pillarjs/cookies/master.svg -[coveralls-url]: https://coveralls.io/r/pillarjs/cookies?branch=master -[downloads-image]: https://img.shields.io/npm/dm/cookies.svg -[downloads-url]: https://npmjs.org/package/cookies -[node-version-image]: https://img.shields.io/node/v/cookies.svg -[node-version-url]: https://nodejs.org/en/download/ -[travis-image]: https://img.shields.io/travis/pillarjs/cookies/master.svg -[travis-url]: https://travis-ci.org/pillarjs/cookies diff --git a/spaces/zhang-wei-jian/docker/node_modules/depd/index.js b/spaces/zhang-wei-jian/docker/node_modules/depd/index.js deleted file mode 100644 index 1bf2fcfdeffc984e5ad792eec08744c29d4a4590..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/depd/index.js +++ /dev/null @@ -1,538 +0,0 @@ -/*! - * depd - * Copyright(c) 2014-2018 Douglas Christopher Wilson - * MIT Licensed - */ - -/** - * Module dependencies. - */ - -var relative = require('path').relative - -/** - * Module exports. - */ - -module.exports = depd - -/** - * Get the path to base files on. - */ - -var basePath = process.cwd() - -/** - * Determine if namespace is contained in the string. - */ - -function containsNamespace (str, namespace) { - var vals = str.split(/[ ,]+/) - var ns = String(namespace).toLowerCase() - - for (var i = 0; i < vals.length; i++) { - var val = vals[i] - - // namespace contained - if (val && (val === '*' || val.toLowerCase() === ns)) { - return true - } - } - - return false -} - -/** - * Convert a data descriptor to accessor descriptor. - */ - -function convertDataDescriptorToAccessor (obj, prop, message) { - var descriptor = Object.getOwnPropertyDescriptor(obj, prop) - var value = descriptor.value - - descriptor.get = function getter () { return value } - - if (descriptor.writable) { - descriptor.set = function setter (val) { return (value = val) } - } - - delete descriptor.value - delete descriptor.writable - - Object.defineProperty(obj, prop, descriptor) - - return descriptor -} - -/** - * Create arguments string to keep arity. - */ - -function createArgumentsString (arity) { - var str = '' - - for (var i = 0; i < arity; i++) { - str += ', arg' + i - } - - return str.substr(2) -} - -/** - * Create stack string from stack. - */ - -function createStackString (stack) { - var str = this.name + ': ' + this.namespace - - if (this.message) { - str += ' deprecated ' + this.message - } - - for (var i = 0; i < stack.length; i++) { - str += '\n at ' + stack[i].toString() - } - - return str -} - -/** - * Create deprecate for namespace in caller. - */ - -function depd (namespace) { - if (!namespace) { - throw new TypeError('argument namespace is required') - } - - var stack = getStack() - var site = callSiteLocation(stack[1]) - var file = site[0] - - function deprecate (message) { - // call to self as log - log.call(deprecate, message) - } - - deprecate._file = file - deprecate._ignored = isignored(namespace) - deprecate._namespace = namespace - deprecate._traced = istraced(namespace) - deprecate._warned = Object.create(null) - - deprecate.function = wrapfunction - deprecate.property = wrapproperty - - return deprecate -} - -/** - * Determine if event emitter has listeners of a given type. - * - * The way to do this check is done three different ways in Node.js >= 0.8 - * so this consolidates them into a minimal set using instance methods. - * - * @param {EventEmitter} emitter - * @param {string} type - * @returns {boolean} - * @private - */ - -function eehaslisteners (emitter, type) { - var count = typeof emitter.listenerCount !== 'function' - ? emitter.listeners(type).length - : emitter.listenerCount(type) - - return count > 0 -} - -/** - * Determine if namespace is ignored. - */ - -function isignored (namespace) { - if (process.noDeprecation) { - // --no-deprecation support - return true - } - - var str = process.env.NO_DEPRECATION || '' - - // namespace ignored - return containsNamespace(str, namespace) -} - -/** - * Determine if namespace is traced. - */ - -function istraced (namespace) { - if (process.traceDeprecation) { - // --trace-deprecation support - return true - } - - var str = process.env.TRACE_DEPRECATION || '' - - // namespace traced - return containsNamespace(str, namespace) -} - -/** - * Display deprecation message. - */ - -function log (message, site) { - var haslisteners = eehaslisteners(process, 'deprecation') - - // abort early if no destination - if (!haslisteners && this._ignored) { - return - } - - var caller - var callFile - var callSite - var depSite - var i = 0 - var seen = false - var stack = getStack() - var file = this._file - - if (site) { - // provided site - depSite = site - callSite = callSiteLocation(stack[1]) - callSite.name = depSite.name - file = callSite[0] - } else { - // get call site - i = 2 - depSite = callSiteLocation(stack[i]) - callSite = depSite - } - - // get caller of deprecated thing in relation to file - for (; i < stack.length; i++) { - caller = callSiteLocation(stack[i]) - callFile = caller[0] - - if (callFile === file) { - seen = true - } else if (callFile === this._file) { - file = this._file - } else if (seen) { - break - } - } - - var key = caller - ? depSite.join(':') + '__' + caller.join(':') - : undefined - - if (key !== undefined && key in this._warned) { - // already warned - return - } - - this._warned[key] = true - - // generate automatic message from call site - var msg = message - if (!msg) { - msg = callSite === depSite || !callSite.name - ? defaultMessage(depSite) - : defaultMessage(callSite) - } - - // emit deprecation if listeners exist - if (haslisteners) { - var err = DeprecationError(this._namespace, msg, stack.slice(i)) - process.emit('deprecation', err) - return - } - - // format and write message - var format = process.stderr.isTTY - ? formatColor - : formatPlain - var output = format.call(this, msg, caller, stack.slice(i)) - process.stderr.write(output + '\n', 'utf8') -} - -/** - * Get call site location as array. - */ - -function callSiteLocation (callSite) { - var file = callSite.getFileName() || '' - var line = callSite.getLineNumber() - var colm = callSite.getColumnNumber() - - if (callSite.isEval()) { - file = callSite.getEvalOrigin() + ', ' + file - } - - var site = [file, line, colm] - - site.callSite = callSite - site.name = callSite.getFunctionName() - - return site -} - -/** - * Generate a default message from the site. - */ - -function defaultMessage (site) { - var callSite = site.callSite - var funcName = site.name - - // make useful anonymous name - if (!funcName) { - funcName = '' - } - - var context = callSite.getThis() - var typeName = context && callSite.getTypeName() - - // ignore useless type name - if (typeName === 'Object') { - typeName = undefined - } - - // make useful type name - if (typeName === 'Function') { - typeName = context.name || typeName - } - - return typeName && callSite.getMethodName() - ? typeName + '.' + funcName - : funcName -} - -/** - * Format deprecation message without color. - */ - -function formatPlain (msg, caller, stack) { - var timestamp = new Date().toUTCString() - - var formatted = timestamp + - ' ' + this._namespace + - ' deprecated ' + msg - - // add stack trace - if (this._traced) { - for (var i = 0; i < stack.length; i++) { - formatted += '\n at ' + stack[i].toString() - } - - return formatted - } - - if (caller) { - formatted += ' at ' + formatLocation(caller) - } - - return formatted -} - -/** - * Format deprecation message with color. - */ - -function formatColor (msg, caller, stack) { - var formatted = '\x1b[36;1m' + this._namespace + '\x1b[22;39m' + // bold cyan - ' \x1b[33;1mdeprecated\x1b[22;39m' + // bold yellow - ' \x1b[0m' + msg + '\x1b[39m' // reset - - // add stack trace - if (this._traced) { - for (var i = 0; i < stack.length; i++) { - formatted += '\n \x1b[36mat ' + stack[i].toString() + '\x1b[39m' // cyan - } - - return formatted - } - - if (caller) { - formatted += ' \x1b[36m' + formatLocation(caller) + '\x1b[39m' // cyan - } - - return formatted -} - -/** - * Format call site location. - */ - -function formatLocation (callSite) { - return relative(basePath, callSite[0]) + - ':' + callSite[1] + - ':' + callSite[2] -} - -/** - * Get the stack as array of call sites. - */ - -function getStack () { - var limit = Error.stackTraceLimit - var obj = {} - var prep = Error.prepareStackTrace - - Error.prepareStackTrace = prepareObjectStackTrace - Error.stackTraceLimit = Math.max(10, limit) - - // capture the stack - Error.captureStackTrace(obj) - - // slice this function off the top - var stack = obj.stack.slice(1) - - Error.prepareStackTrace = prep - Error.stackTraceLimit = limit - - return stack -} - -/** - * Capture call site stack from v8. - */ - -function prepareObjectStackTrace (obj, stack) { - return stack -} - -/** - * Return a wrapped function in a deprecation message. - */ - -function wrapfunction (fn, message) { - if (typeof fn !== 'function') { - throw new TypeError('argument fn must be a function') - } - - var args = createArgumentsString(fn.length) - var stack = getStack() - var site = callSiteLocation(stack[1]) - - site.name = fn.name - - // eslint-disable-next-line no-new-func - var deprecatedfn = new Function('fn', 'log', 'deprecate', 'message', 'site', - '"use strict"\n' + - 'return function (' + args + ') {' + - 'log.call(deprecate, message, site)\n' + - 'return fn.apply(this, arguments)\n' + - '}')(fn, log, this, message, site) - - return deprecatedfn -} - -/** - * Wrap property in a deprecation message. - */ - -function wrapproperty (obj, prop, message) { - if (!obj || (typeof obj !== 'object' && typeof obj !== 'function')) { - throw new TypeError('argument obj must be object') - } - - var descriptor = Object.getOwnPropertyDescriptor(obj, prop) - - if (!descriptor) { - throw new TypeError('must call property on owner object') - } - - if (!descriptor.configurable) { - throw new TypeError('property must be configurable') - } - - var deprecate = this - var stack = getStack() - var site = callSiteLocation(stack[1]) - - // set site name - site.name = prop - - // convert data descriptor - if ('value' in descriptor) { - descriptor = convertDataDescriptorToAccessor(obj, prop, message) - } - - var get = descriptor.get - var set = descriptor.set - - // wrap getter - if (typeof get === 'function') { - descriptor.get = function getter () { - log.call(deprecate, message, site) - return get.apply(this, arguments) - } - } - - // wrap setter - if (typeof set === 'function') { - descriptor.set = function setter () { - log.call(deprecate, message, site) - return set.apply(this, arguments) - } - } - - Object.defineProperty(obj, prop, descriptor) -} - -/** - * Create DeprecationError for deprecation - */ - -function DeprecationError (namespace, message, stack) { - var error = new Error() - var stackString - - Object.defineProperty(error, 'constructor', { - value: DeprecationError - }) - - Object.defineProperty(error, 'message', { - configurable: true, - enumerable: false, - value: message, - writable: true - }) - - Object.defineProperty(error, 'name', { - enumerable: false, - configurable: true, - value: 'DeprecationError', - writable: true - }) - - Object.defineProperty(error, 'namespace', { - configurable: true, - enumerable: false, - value: namespace, - writable: true - }) - - Object.defineProperty(error, 'stack', { - configurable: true, - enumerable: false, - get: function () { - if (stackString !== undefined) { - return stackString - } - - // prepare stack trace - return (stackString = createStackString.call(this, stack)) - }, - set: function setter (val) { - stackString = val - } - }) - - return error -} diff --git a/spaces/zhang-wei-jian/docker/node_modules/on-finished/index.js b/spaces/zhang-wei-jian/docker/node_modules/on-finished/index.js deleted file mode 100644 index e68df7bde39bb47aa1fd7eff4b317b3969de75bd..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/on-finished/index.js +++ /dev/null @@ -1,234 +0,0 @@ -/*! - * on-finished - * Copyright(c) 2013 Jonathan Ong - * Copyright(c) 2014 Douglas Christopher Wilson - * MIT Licensed - */ - -'use strict' - -/** - * Module exports. - * @public - */ - -module.exports = onFinished -module.exports.isFinished = isFinished - -/** - * Module dependencies. - * @private - */ - -var asyncHooks = tryRequireAsyncHooks() -var first = require('ee-first') - -/** - * Variables. - * @private - */ - -/* istanbul ignore next */ -var defer = typeof setImmediate === 'function' - ? setImmediate - : function (fn) { process.nextTick(fn.bind.apply(fn, arguments)) } - -/** - * Invoke callback when the response has finished, useful for - * cleaning up resources afterwards. - * - * @param {object} msg - * @param {function} listener - * @return {object} - * @public - */ - -function onFinished (msg, listener) { - if (isFinished(msg) !== false) { - defer(listener, null, msg) - return msg - } - - // attach the listener to the message - attachListener(msg, wrap(listener)) - - return msg -} - -/** - * Determine if message is already finished. - * - * @param {object} msg - * @return {boolean} - * @public - */ - -function isFinished (msg) { - var socket = msg.socket - - if (typeof msg.finished === 'boolean') { - // OutgoingMessage - return Boolean(msg.finished || (socket && !socket.writable)) - } - - if (typeof msg.complete === 'boolean') { - // IncomingMessage - return Boolean(msg.upgrade || !socket || !socket.readable || (msg.complete && !msg.readable)) - } - - // don't know - return undefined -} - -/** - * Attach a finished listener to the message. - * - * @param {object} msg - * @param {function} callback - * @private - */ - -function attachFinishedListener (msg, callback) { - var eeMsg - var eeSocket - var finished = false - - function onFinish (error) { - eeMsg.cancel() - eeSocket.cancel() - - finished = true - callback(error) - } - - // finished on first message event - eeMsg = eeSocket = first([[msg, 'end', 'finish']], onFinish) - - function onSocket (socket) { - // remove listener - msg.removeListener('socket', onSocket) - - if (finished) return - if (eeMsg !== eeSocket) return - - // finished on first socket event - eeSocket = first([[socket, 'error', 'close']], onFinish) - } - - if (msg.socket) { - // socket already assigned - onSocket(msg.socket) - return - } - - // wait for socket to be assigned - msg.on('socket', onSocket) - - if (msg.socket === undefined) { - // istanbul ignore next: node.js 0.8 patch - patchAssignSocket(msg, onSocket) - } -} - -/** - * Attach the listener to the message. - * - * @param {object} msg - * @return {function} - * @private - */ - -function attachListener (msg, listener) { - var attached = msg.__onFinished - - // create a private single listener with queue - if (!attached || !attached.queue) { - attached = msg.__onFinished = createListener(msg) - attachFinishedListener(msg, attached) - } - - attached.queue.push(listener) -} - -/** - * Create listener on message. - * - * @param {object} msg - * @return {function} - * @private - */ - -function createListener (msg) { - function listener (err) { - if (msg.__onFinished === listener) msg.__onFinished = null - if (!listener.queue) return - - var queue = listener.queue - listener.queue = null - - for (var i = 0; i < queue.length; i++) { - queue[i](err, msg) - } - } - - listener.queue = [] - - return listener -} - -/** - * Patch ServerResponse.prototype.assignSocket for node.js 0.8. - * - * @param {ServerResponse} res - * @param {function} callback - * @private - */ - -// istanbul ignore next: node.js 0.8 patch -function patchAssignSocket (res, callback) { - var assignSocket = res.assignSocket - - if (typeof assignSocket !== 'function') return - - // res.on('socket', callback) is broken in 0.8 - res.assignSocket = function _assignSocket (socket) { - assignSocket.call(this, socket) - callback(socket) - } -} - -/** - * Try to require async_hooks - * @private - */ - -function tryRequireAsyncHooks () { - try { - return require('async_hooks') - } catch (e) { - return {} - } -} - -/** - * Wrap function with async resource, if possible. - * AsyncResource.bind static method backported. - * @private - */ - -function wrap (fn) { - var res - - // create anonymous resource - if (asyncHooks.AsyncResource) { - res = new asyncHooks.AsyncResource(fn.name || 'bound-anonymous-fn') - } - - // incompatible node.js - if (!res || !res.runInAsyncScope) { - return fn - } - - // return bound function - return res.runInAsyncScope.bind(res, fn, null) -} diff --git a/spaces/zhaoys/wfms-kuiwenc/src/components/ui/input.tsx b/spaces/zhaoys/wfms-kuiwenc/src/components/ui/input.tsx deleted file mode 100644 index 684a857f3d769b78818fb13de1abaebfb09ca79c..0000000000000000000000000000000000000000 --- a/spaces/zhaoys/wfms-kuiwenc/src/components/ui/input.tsx +++ /dev/null @@ -1,25 +0,0 @@ -import * as React from 'react' - -import { cn } from '@/lib/utils' - -export interface InputProps - extends React.InputHTMLAttributes {} - -const Input = React.forwardRef( - ({ className, type, ...props }, ref) => { - return ( - - ) - } -) -Input.displayName = 'Input' - -export { Input } diff --git a/spaces/zideliu/styledrop/timm/models/layers/selective_kernel.py b/spaces/zideliu/styledrop/timm/models/layers/selective_kernel.py deleted file mode 100644 index 10bfd0e0d4e1c6e4dce1e69305ff990ddf85cb6f..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/models/layers/selective_kernel.py +++ /dev/null @@ -1,118 +0,0 @@ -""" Selective Kernel Convolution/Attention - -Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) - -Hacked together by / Copyright 2020 Ross Wightman -""" -import torch -from torch import nn as nn - -from .conv_bn_act import ConvBnAct - - -def _kernel_valid(k): - if isinstance(k, (list, tuple)): - for ki in k: - return _kernel_valid(ki) - assert k >= 3 and k % 2 - - -class SelectiveKernelAttn(nn.Module): - def __init__(self, channels, num_paths=2, attn_channels=32, - act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): - """ Selective Kernel Attention Module - - Selective Kernel attention mechanism factored out into its own module. - - """ - super(SelectiveKernelAttn, self).__init__() - self.num_paths = num_paths - self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) - self.bn = norm_layer(attn_channels) - self.act = act_layer(inplace=True) - self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) - - def forward(self, x): - assert x.shape[1] == self.num_paths - x = x.sum(1).mean((2, 3), keepdim=True) - x = self.fc_reduce(x) - x = self.bn(x) - x = self.act(x) - x = self.fc_select(x) - B, C, H, W = x.shape - x = x.view(B, self.num_paths, C // self.num_paths, H, W) - x = torch.softmax(x, dim=1) - return x - - -class SelectiveKernelConv(nn.Module): - - def __init__(self, in_channels, out_channels, kernel_size=None, stride=1, dilation=1, groups=1, - attn_reduction=16, min_attn_channels=32, keep_3x3=True, split_input=False, - drop_block=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None): - """ Selective Kernel Convolution Module - - As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. - - Largest change is the input split, which divides the input channels across each convolution path, this can - be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps - the parameter count from ballooning when the convolutions themselves don't have groups, but still provides - a noteworthy increase in performance over similar param count models without this attention layer. -Ross W - - Args: - in_channels (int): module input (feature) channel count - out_channels (int): module output (feature) channel count - kernel_size (int, list): kernel size for each convolution branch - stride (int): stride for convolutions - dilation (int): dilation for module as a whole, impacts dilation of each branch - groups (int): number of groups for each branch - attn_reduction (int, float): reduction factor for attention features - min_attn_channels (int): minimum attention feature channels - keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations - split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, - can be viewed as grouping by path, output expands to module out_channels count - drop_block (nn.Module): drop block module - act_layer (nn.Module): activation layer to use - norm_layer (nn.Module): batchnorm/norm layer to use - """ - super(SelectiveKernelConv, self).__init__() - kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation - _kernel_valid(kernel_size) - if not isinstance(kernel_size, list): - kernel_size = [kernel_size] * 2 - if keep_3x3: - dilation = [dilation * (k - 1) // 2 for k in kernel_size] - kernel_size = [3] * len(kernel_size) - else: - dilation = [dilation] * len(kernel_size) - self.num_paths = len(kernel_size) - self.in_channels = in_channels - self.out_channels = out_channels - self.split_input = split_input - if self.split_input: - assert in_channels % self.num_paths == 0 - in_channels = in_channels // self.num_paths - groups = min(out_channels, groups) - - conv_kwargs = dict( - stride=stride, groups=groups, drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, - aa_layer=aa_layer) - self.paths = nn.ModuleList([ - ConvBnAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) - for k, d in zip(kernel_size, dilation)]) - - attn_channels = max(int(out_channels / attn_reduction), min_attn_channels) - self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) - self.drop_block = drop_block - - def forward(self, x): - if self.split_input: - x_split = torch.split(x, self.in_channels // self.num_paths, 1) - x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] - else: - x_paths = [op(x) for op in self.paths] - x = torch.stack(x_paths, dim=1) - x_attn = self.attn(x) - x = x * x_attn - x = torch.sum(x, dim=1) - return x diff --git a/spaces/zideliu/styledrop/timm/models/senet.py b/spaces/zideliu/styledrop/timm/models/senet.py deleted file mode 100644 index 8073229a721930e279f07a46590eda71d5756904..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/models/senet.py +++ /dev/null @@ -1,465 +0,0 @@ -""" -SEResNet implementation from Cadene's pretrained models -https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py -Additional credit to https://github.com/creafz - -Original model: https://github.com/hujie-frank/SENet - -ResNet code gently borrowed from -https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py - -FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate -support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. -""" -import math -from collections import OrderedDict - -import torch.nn as nn -import torch.nn.functional as F - -from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from .helpers import build_model_with_cfg -from .layers import create_classifier -from .registry import register_model - -__all__ = ['SENet'] - - -def _cfg(url='', **kwargs): - return { - 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', - **kwargs - } - - -default_cfgs = { - 'legacy_senet154': - _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'), - 'legacy_seresnet18': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', - interpolation='bicubic'), - 'legacy_seresnet34': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), - 'legacy_seresnet50': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), - 'legacy_seresnet101': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), - 'legacy_seresnet152': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), - 'legacy_seresnext26_32x4d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', - interpolation='bicubic'), - 'legacy_seresnext50_32x4d': - _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'), - 'legacy_seresnext101_32x4d': - _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'), -} - - -def _weight_init(m): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1.) - nn.init.constant_(m.bias, 0.) - - -class SEModule(nn.Module): - - def __init__(self, channels, reduction): - super(SEModule, self).__init__() - self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) - self.relu = nn.ReLU(inplace=True) - self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - module_input = x - x = x.mean((2, 3), keepdim=True) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - return module_input * x - - -class Bottleneck(nn.Module): - """ - Base class for bottlenecks that implements `forward()` method. - """ - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out = self.se_module(out) + residual - out = self.relu(out) - - return out - - -class SEBottleneck(Bottleneck): - """ - Bottleneck for SENet154. - """ - expansion = 4 - - def __init__(self, inplanes, planes, groups, reduction, stride=1, - downsample=None): - super(SEBottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes * 2) - self.conv2 = nn.Conv2d( - planes * 2, planes * 4, kernel_size=3, stride=stride, - padding=1, groups=groups, bias=False) - self.bn2 = nn.BatchNorm2d(planes * 4) - self.conv3 = nn.Conv2d( - planes * 4, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes * 4, reduction=reduction) - self.downsample = downsample - self.stride = stride - - -class SEResNetBottleneck(Bottleneck): - """ - ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe - implementation and uses `stride=stride` in `conv1` and not in `conv2` - (the latter is used in the torchvision implementation of ResNet). - """ - expansion = 4 - - def __init__(self, inplanes, planes, groups, reduction, stride=1, - downsample=None): - super(SEResNetBottleneck, self).__init__() - self.conv1 = nn.Conv2d( - inplanes, planes, kernel_size=1, bias=False, stride=stride) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d( - planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes * 4, reduction=reduction) - self.downsample = downsample - self.stride = stride - - -class SEResNeXtBottleneck(Bottleneck): - """ - ResNeXt bottleneck type C with a Squeeze-and-Excitation module. - """ - expansion = 4 - - def __init__(self, inplanes, planes, groups, reduction, stride=1, - downsample=None, base_width=4): - super(SEResNeXtBottleneck, self).__init__() - width = math.floor(planes * (base_width / 64)) * groups - self.conv1 = nn.Conv2d( - inplanes, width, kernel_size=1, bias=False, stride=1) - self.bn1 = nn.BatchNorm2d(width) - self.conv2 = nn.Conv2d( - width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) - self.bn2 = nn.BatchNorm2d(width) - self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes * 4, reduction=reduction) - self.downsample = downsample - self.stride = stride - - -class SEResNetBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): - super(SEResNetBlock, self).__init__() - self.conv1 = nn.Conv2d( - inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d( - planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes, reduction=reduction) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out = self.se_module(out) + residual - out = self.relu(out) - - return out - - -class SENet(nn.Module): - - def __init__(self, block, layers, groups, reduction, drop_rate=0.2, - in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, - downsample_padding=0, num_classes=1000, global_pool='avg'): - """ - Parameters - ---------- - block (nn.Module): Bottleneck class. - - For SENet154: SEBottleneck - - For SE-ResNet models: SEResNetBottleneck - - For SE-ResNeXt models: SEResNeXtBottleneck - layers (list of ints): Number of residual blocks for 4 layers of the - network (layer1...layer4). - groups (int): Number of groups for the 3x3 convolution in each - bottleneck block. - - For SENet154: 64 - - For SE-ResNet models: 1 - - For SE-ResNeXt models: 32 - reduction (int): Reduction ratio for Squeeze-and-Excitation modules. - - For all models: 16 - dropout_p (float or None): Drop probability for the Dropout layer. - If `None` the Dropout layer is not used. - - For SENet154: 0.2 - - For SE-ResNet models: None - - For SE-ResNeXt models: None - inplanes (int): Number of input channels for layer1. - - For SENet154: 128 - - For SE-ResNet models: 64 - - For SE-ResNeXt models: 64 - input_3x3 (bool): If `True`, use three 3x3 convolutions instead of - a single 7x7 convolution in layer0. - - For SENet154: True - - For SE-ResNet models: False - - For SE-ResNeXt models: False - downsample_kernel_size (int): Kernel size for downsampling convolutions - in layer2, layer3 and layer4. - - For SENet154: 3 - - For SE-ResNet models: 1 - - For SE-ResNeXt models: 1 - downsample_padding (int): Padding for downsampling convolutions in - layer2, layer3 and layer4. - - For SENet154: 1 - - For SE-ResNet models: 0 - - For SE-ResNeXt models: 0 - num_classes (int): Number of outputs in `last_linear` layer. - - For all models: 1000 - """ - super(SENet, self).__init__() - self.inplanes = inplanes - self.num_classes = num_classes - self.drop_rate = drop_rate - if input_3x3: - layer0_modules = [ - ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), - ('bn1', nn.BatchNorm2d(64)), - ('relu1', nn.ReLU(inplace=True)), - ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), - ('bn2', nn.BatchNorm2d(64)), - ('relu2', nn.ReLU(inplace=True)), - ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), - ('bn3', nn.BatchNorm2d(inplanes)), - ('relu3', nn.ReLU(inplace=True)), - ] - else: - layer0_modules = [ - ('conv1', nn.Conv2d( - in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), - ('bn1', nn.BatchNorm2d(inplanes)), - ('relu1', nn.ReLU(inplace=True)), - ] - self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) - # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. - self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) - self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] - self.layer1 = self._make_layer( - block, - planes=64, - blocks=layers[0], - groups=groups, - reduction=reduction, - downsample_kernel_size=1, - downsample_padding=0 - ) - self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] - self.layer2 = self._make_layer( - block, - planes=128, - blocks=layers[1], - stride=2, - groups=groups, - reduction=reduction, - downsample_kernel_size=downsample_kernel_size, - downsample_padding=downsample_padding - ) - self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] - self.layer3 = self._make_layer( - block, - planes=256, - blocks=layers[2], - stride=2, - groups=groups, - reduction=reduction, - downsample_kernel_size=downsample_kernel_size, - downsample_padding=downsample_padding - ) - self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] - self.layer4 = self._make_layer( - block, - planes=512, - blocks=layers[3], - stride=2, - groups=groups, - reduction=reduction, - downsample_kernel_size=downsample_kernel_size, - downsample_padding=downsample_padding - ) - self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] - self.num_features = 512 * block.expansion - self.global_pool, self.last_linear = create_classifier( - self.num_features, self.num_classes, pool_type=global_pool) - - for m in self.modules(): - _weight_init(m) - - def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, - downsample_kernel_size=1, downsample_padding=0): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, - stride=stride, padding=downsample_padding, bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, groups, reduction)) - - return nn.Sequential(*layers) - - def get_classifier(self): - return self.last_linear - - def reset_classifier(self, num_classes, global_pool='avg'): - self.num_classes = num_classes - self.global_pool, self.last_linear = create_classifier( - self.num_features, self.num_classes, pool_type=global_pool) - - def forward_features(self, x): - x = self.layer0(x) - x = self.pool0(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - return x - - def logits(self, x): - x = self.global_pool(x) - if self.drop_rate > 0.: - x = F.dropout(x, p=self.drop_rate, training=self.training) - x = self.last_linear(x) - return x - - def forward(self, x): - x = self.forward_features(x) - x = self.logits(x) - return x - - -def _create_senet(variant, pretrained=False, **kwargs): - return build_model_with_cfg( - SENet, variant, default_cfg=default_cfgs[variant], pretrained=pretrained, **kwargs) - - -@register_model -def legacy_seresnet18(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet18', pretrained, **model_args) - - -@register_model -def legacy_seresnet34(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet34', pretrained, **model_args) - - -@register_model -def legacy_seresnet50(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet50', pretrained, **model_args) - - -@register_model -def legacy_seresnet101(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet101', pretrained, **model_args) - - -@register_model -def legacy_seresnet152(pretrained=False, **kwargs): - model_args = dict( - block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) - return _create_senet('legacy_seresnet152', pretrained, **model_args) - - -@register_model -def legacy_senet154(pretrained=False, **kwargs): - model_args = dict( - block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, - downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) - return _create_senet('legacy_senet154', pretrained, **model_args) - - -@register_model -def legacy_seresnext26_32x4d(pretrained=False, **kwargs): - model_args = dict( - block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) - return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) - - -@register_model -def legacy_seresnext50_32x4d(pretrained=False, **kwargs): - model_args = dict( - block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) - return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) - - -@register_model -def legacy_seresnext101_32x4d(pretrained=False, **kwargs): - model_args = dict( - block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) - return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) diff --git a/spaces/zomehwh/bert_vits2/text/cleaner.py b/spaces/zomehwh/bert_vits2/text/cleaner.py deleted file mode 100644 index 3ba3739816aabbe16663b68c74fcda0588c14bab..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/bert_vits2/text/cleaner.py +++ /dev/null @@ -1,28 +0,0 @@ -from text import chinese, japanese, cleaned_text_to_sequence - - -language_module_map = {"ZH": chinese, "JP": japanese} - - -def clean_text(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - return norm_text, phones, tones, word2ph - - -def clean_text_bert(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - bert = language_module.get_bert_feature(norm_text, word2ph) - return phones, tones, bert - - -def text_to_sequence(text, language): - norm_text, phones, tones, word2ph = clean_text(text, language) - return cleaned_text_to_sequence(phones, tones, language) - - -if __name__ == "__main__": - pass diff --git a/spaces/zomehwh/bert_vits2/text/japanese_bert.py b/spaces/zomehwh/bert_vits2/text/japanese_bert.py deleted file mode 100644 index 308081af3b997c5ef61ce368009019a7b77ebe09..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/bert_vits2/text/japanese_bert.py +++ /dev/null @@ -1,87 +0,0 @@ -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM -import sys -import os -from text.japanese import text2sep_kata -tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3") - -models = dict() - - -def get_bert_feature(text, word2ph, device=None): - sep_text,_ = text2sep_kata(text) - sep_tokens = [tokenizer.tokenize(t) for t in sep_text] - sep_ids = [tokenizer.convert_tokens_to_ids(t) for t in sep_tokens] - sep_ids = [2]+[item for sublist in sep_ids for item in sublist]+[3] - return get_bert_feature_with_token(sep_ids, word2ph, device) - - -# def get_bert_feature(text, word2ph, device=None): -# if ( -# sys.platform == "darwin" -# and torch.backends.mps.is_available() -# and device == "cpu" -# ): -# device = "mps" -# if not device: -# device = "cuda" -# if device not in models.keys(): -# models[device] = AutoModelForMaskedLM.from_pretrained( -# "cl-tohoku/bert-base-japanese-v3" -# ).to(device) -# with torch.no_grad(): -# inputs = tokenizer(text, return_tensors="pt") -# for i in inputs: -# inputs[i] = inputs[i].to(device) -# res = models[device](**inputs, output_hidden_states=True) -# res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu() -# assert inputs["input_ids"].shape[-1] == len(word2ph) -# word2phone = word2ph -# phone_level_feature = [] -# for i in range(len(word2phone)): -# repeat_feature = res[i].repeat(word2phone[i], 1) -# phone_level_feature.append(repeat_feature) - -# phone_level_feature = torch.cat(phone_level_feature, dim=0) - -# return phone_level_feature.T - -def get_bert_feature_with_token(tokens, word2ph, device=None): - if ( - sys.platform == "darwin" - and torch.backends.mps.is_available() - and device == "cpu" - ): - device = "mps" - if not device: - device = "cuda" - if device not in models.keys(): - models[device] = AutoModelForMaskedLM.from_pretrained( - "./bert/bert-base-japanese-v3" - ).to(device) - with torch.no_grad(): - inputs = torch.tensor(tokens).to(device).unsqueeze(0) - token_type_ids = torch.zeros_like(inputs).to(device) - attention_mask = torch.ones_like(inputs).to(device) - inputs = {"input_ids": inputs, "token_type_ids": token_type_ids, "attention_mask": attention_mask} - - - # for i in inputs: - # inputs[i] = inputs[i].to(device) - res = models[device](**inputs, output_hidden_states=True) - res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu() - assert inputs["input_ids"].shape[-1] == len(word2ph) - word2phone = word2ph - phone_level_feature = [] - for i in range(len(word2phone)): - repeat_feature = res[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - return phone_level_feature.T - - -if __name__ == "__main__": - print(get_bert_feature("観覧車",[4,2])) - pass \ No newline at end of file diff --git a/spaces/zzz666/ChuanhuChatGPT/custom.css b/spaces/zzz666/ChuanhuChatGPT/custom.css deleted file mode 100644 index 5143eb138ea2469d8c457c71cb210fd3fb7cbe15..0000000000000000000000000000000000000000 --- a/spaces/zzz666/ChuanhuChatGPT/custom.css +++ /dev/null @@ -1,162 +0,0 @@ -:root { - --chatbot-color-light: #F3F3F3; - --chatbot-color-dark: #121111; -} - -/* status_display */ -#status_display { - display: flex; - min-height: 2.5em; - align-items: flex-end; - justify-content: flex-end; -} -#status_display p { - font-size: .85em; - font-family: monospace; - color: var(--body-text-color-subdued); -} - -#chuanhu_chatbot, #status_display { - transition: all 0.6s; -} -/* list */ -ol:not(.options), ul:not(.options) { - padding-inline-start: 2em !important; -} - -/* 亮色 */ -#chuanhu_chatbot { - background-color: var(--chatbot-color-light) !important; -} -[data-testid = "bot"] { - background-color: #FFFFFF !important; -} -[data-testid = "user"] { - background-color: #95EC69 !important; -} -/* 对话气泡 */ -[class *= "message"] { - border-radius: var(--radius-xl) !important; - border: none; - padding: var(--spacing-xl) !important; - font-size: var(--text-md) !important; - line-height: var(--line-md) !important; - min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); - min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); -} -[data-testid = "bot"] { - max-width: 85%; - border-bottom-left-radius: 0 !important; -} -[data-testid = "user"] { - max-width: 85%; - width: auto !important; - border-bottom-right-radius: 0 !important; -} -/* 表格 */ -table { - margin: 1em 0; - border-collapse: collapse; - empty-cells: show; -} -td,th { - border: 1.2px solid var(--border-color-primary) !important; - padding: 0.2em; -} -thead { - background-color: rgba(175,184,193,0.2); -} -thead th { - padding: .5em .2em; -} -/* 行内代码 */ -code { - display: inline; - white-space: break-spaces; - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 代码块 */ -pre code { - display: block; - overflow: auto; - white-space: pre; - background-color: hsla(0, 0%, 0%, 80%)!important; - border-radius: 10px; - padding: 1.4em 1.2em 0em 1.4em; - margin: 1.2em 2em 1.2em 0.5em; - color: #FFF; - box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2); -} -/* 代码高亮样式 */ -.highlight .hll { background-color: #49483e } -.highlight .c { color: #75715e } /* Comment */ -.highlight .err { color: #960050; background-color: #1e0010 } /* Error */ -.highlight .k { color: #66d9ef } /* Keyword */ -.highlight .l { color: #ae81ff } /* Literal */ -.highlight .n { color: #f8f8f2 } /* Name */ -.highlight .o { color: #f92672 } /* Operator */ -.highlight .p { color: #f8f8f2 } /* Punctuation */ -.highlight .ch { color: #75715e } /* Comment.Hashbang */ -.highlight .cm { color: #75715e } /* Comment.Multiline */ -.highlight .cp { color: #75715e } /* Comment.Preproc */ -.highlight .cpf { color: #75715e } /* Comment.PreprocFile */ -.highlight .c1 { color: #75715e } /* Comment.Single */ -.highlight .cs { color: #75715e } /* Comment.Special */ -.highlight .gd { color: #f92672 } /* Generic.Deleted */ -.highlight .ge { font-style: italic } /* Generic.Emph */ -.highlight .gi { color: #a6e22e } /* Generic.Inserted */ -.highlight .gs { font-weight: bold } /* Generic.Strong */ -.highlight .gu { color: #75715e } /* Generic.Subheading */ -.highlight .kc { color: #66d9ef } /* Keyword.Constant */ -.highlight .kd { color: #66d9ef } /* Keyword.Declaration */ -.highlight .kn { color: #f92672 } /* Keyword.Namespace */ -.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ -.highlight .kr { color: #66d9ef } /* Keyword.Reserved */ -.highlight .kt { color: #66d9ef } /* Keyword.Type */ -.highlight .ld { color: #e6db74 } /* Literal.Date */ -.highlight .m { color: #ae81ff } /* Literal.Number */ -.highlight .s { color: #e6db74 } /* Literal.String */ -.highlight .na { color: #a6e22e } /* Name.Attribute */ -.highlight .nb { color: #f8f8f2 } /* Name.Builtin */ -.highlight .nc { color: #a6e22e } /* Name.Class */ -.highlight .no { color: #66d9ef } /* Name.Constant */ -.highlight .nd { color: #a6e22e } /* Name.Decorator */ -.highlight .ni { color: #f8f8f2 } /* Name.Entity */ -.highlight .ne { color: #a6e22e } /* Name.Exception */ -.highlight .nf { color: #a6e22e } /* Name.Function */ -.highlight .nl { color: #f8f8f2 } /* Name.Label */ -.highlight .nn { color: #f8f8f2 } /* Name.Namespace */ -.highlight .nx { color: #a6e22e } /* Name.Other */ -.highlight .py { color: #f8f8f2 } /* Name.Property */ -.highlight .nt { color: #f92672 } /* Name.Tag */ -.highlight .nv { color: #f8f8f2 } /* Name.Variable */ -.highlight .ow { color: #f92672 } /* Operator.Word */ -.highlight .w { color: #f8f8f2 } /* Text.Whitespace */ -.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ -.highlight .mf { color: #ae81ff } /* Literal.Number.Float */ -.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ -.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ -.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ -.highlight .sa { color: #e6db74 } /* Literal.String.Affix */ -.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ -.highlight .sc { color: #e6db74 } /* Literal.String.Char */ -.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ -.highlight .sd { color: #e6db74 } /* Literal.String.Doc */ -.highlight .s2 { color: #e6db74 } /* Literal.String.Double */ -.highlight .se { color: #ae81ff } /* Literal.String.Escape */ -.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ -.highlight .si { color: #e6db74 } /* Literal.String.Interpol */ -.highlight .sx { color: #e6db74 } /* Literal.String.Other */ -.highlight .sr { color: #e6db74 } /* Literal.String.Regex */ -.highlight .s1 { color: #e6db74 } /* Literal.String.Single */ -.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ -.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ -.highlight .fm { color: #a6e22e } /* Name.Function.Magic */ -.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ -.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ -.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ -.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ -.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */