diff --git a/spaces/1368565466ki/Satdia/app.py b/spaces/1368565466ki/Satdia/app.py deleted file mode 100644 index 31cdc30680f88fe0a9a7e96575218eeeca606ad1..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/Satdia/app.py +++ /dev/null @@ -1,290 +0,0 @@ -# coding=utf-8 -import os -import re -import argparse -import utils -import commons -import json -import torch -import gradio as gr -from models import SynthesizerTrn -from text import text_to_sequence, _clean_text -from torch import no_grad, LongTensor -import gradio.processing_utils as gr_processing_utils -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -hps_ms = utils.get_hparams_from_file(r'config/config.json') - -audio_postprocess_ori = gr.Audio.postprocess - -def audio_postprocess(self, y): - data = audio_postprocess_ori(self, y) - if data is None: - return None - return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) - - -gr.Audio.postprocess = audio_postprocess - -def get_text(text, hps, is_symbol): - text_norm, clean_text = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def create_tts_fn(net_g_ms, speaker_id): - def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol): - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if limitation: - text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) - max_len = 100 - if is_symbol: - max_len *= 3 - if text_len > max_len: - return "Error: Text is too long", None - if not is_symbol: - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms, is_symbol) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - sid = LongTensor([speaker_id]).to(device) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "Success", (22050, audio) - return tts_fn - -def create_to_symbol_fn(hps): - def to_symbol_fn(is_symbol_input, input_text, temp_lang): - if temp_lang == 0: - clean_text = f'[ZH]{input_text}[ZH]' - elif temp_lang == 1: - clean_text = f'[JA]{input_text}[JA]' - else: - clean_text = input_text - return _clean_text(clean_text, hps.data.text_cleaners) if is_symbol_input else '' - - return to_symbol_fn -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - elif language == 1: - return 0.6, 0.668, 1 - else: - return 0.6, 0.668, 1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio-{audio_id}").querySelector("audio"); - let text = root.querySelector("#input-text-{audio_id}").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--api', action="store_true", default=False) - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--all", action="store_true", default=False, help="enable all models") - args = parser.parse_args() - device = torch.device(args.device) - categories = ["Honkai: Star Rail", "Blue Archive", "Lycoris Recoil"] - others = { - "Princess Connect! Re:Dive": "https://huggingface.co/spaces/sayashi/vits-models-pcr", - "Genshin Impact": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3", - "Honkai Impact 3rd": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3", - "Overwatch 2": "https://huggingface.co/spaces/sayashi/vits-models-ow2", - } - if args.all: - categories = ["Honkai: Star Rail", "Blue Archive", "Lycoris Recoil", "Princess Connect! Re:Dive", "Genshin Impact", "Honkai Impact 3rd", "Overwatch 2"] - others = {} - models = [] - with open("pretrained_models/info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for i, info in models_info.items(): - if info['title'].split("-")[0] not in categories or not info['enable']: - continue - sid = info['sid'] - name_en = info['name_en'] - name_zh = info['name_zh'] - title = info['title'] - cover = f"pretrained_models/{i}/{info['cover']}" - example = info['example'] - language = info['language'] - net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers if info['type'] == "multi" else 0, - **hps_ms.model) - utils.load_checkpoint(f'pretrained_models/{i}/{i}.pth', net_g_ms, None) - _ = net_g_ms.eval().to(device) - models.append((sid, name_en, name_zh, title, cover, example, language, net_g_ms, create_tts_fn(net_g_ms, sid), create_to_symbol_fn(hps_ms))) - with gr.Blocks() as app: - gr.Markdown( - "#
vits-models\n" - "##
Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n" - "##
请不要生成会对个人以及组织造成侵害的内容\n\n" - "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)\n\n" - "[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm-dark.svg)](https://huggingface.co/spaces/sayashi/vits-models?duplicate=true)\n\n" - "[![Finetune your own model](https://badgen.net/badge/icon/github?icon=github&label=Finetune%20your%20own%20model)](https://github.com/SayaSS/vits-finetuning)" - ) - - with gr.Tabs(): - for category in categories: - with gr.TabItem(category): - with gr.TabItem("EN"): - for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models: - if title.split("-")[0] != category: - continue - with gr.TabItem(name_en): - with gr.Row(): - gr.Markdown( - '
' - f'{title}' - f'' if cover else "" - '
' - ) - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)" if limitation else "Text", lines=5, value=example, elem_id=f"input-text-en-{name_en.replace(' ','')}") - lang = gr.Dropdown(label="Language", choices=["Chinese", "Japanese", "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"], - type="index", value=language) - with gr.Accordion(label="Advanced Options", open=False): - symbol_input = gr.Checkbox(value=False, label="Symbol input") - symbol_list = gr.Dataset(label="Symbol list", components=[input_text], - samples=[[x] for x in hps_ms.symbols]) - symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False) - btn = gr.Button(value="Generate", variant="primary") - with gr.Row(): - ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}") - download = gr.Button("Download Audio") - btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2], api_name=f"tts-{name_en}") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}")) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - symbol_input.change( - to_symbol_fn, - [symbol_input, input_text, lang], - [input_text] - ) - symbol_list.click(None, [symbol_list, symbol_list_json], [input_text], - _js=f""" - (i,symbols) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#input-text-en-{name_en.replace(' ', '')}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - return text_input.value; - }}""") - with gr.TabItem("中文"): - for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models: - if title.split("-")[0] != category: - continue - with gr.TabItem(name_zh): - with gr.Row(): - gr.Markdown( - '
' - f'{title}' - f'' if cover else "" - '
' - ) - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="文本 (100字上限)" if limitation else "文本", lines=5, value=example, elem_id=f"input-text-zh-{name_zh}") - lang = gr.Dropdown(label="语言", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文"if language == "Chinese" else "日语") - with gr.Accordion(label="高级选项", open=False): - symbol_input = gr.Checkbox(value=False, label="符号输入") - symbol_list = gr.Dataset(label="符号列表", components=[input_text], - samples=[[x] for x in hps_ms.symbols]) - symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False) - btn = gr.Button(value="生成", variant="primary") - with gr.Row(): - ns = gr.Slider(label="控制感情变化程度", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="控制音素发音长度", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="控制整体语速", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="输出信息") - o2 = gr.Audio(label="输出音频", elem_id=f"tts-audio-zh-{name_zh}") - download = gr.Button("下载音频") - btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2]) - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"zh-{name_zh}")) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - symbol_input.change( - to_symbol_fn, - [symbol_input, input_text, lang], - [input_text] - ) - symbol_list.click(None, [symbol_list, symbol_list_json], [input_text], - _js=f""" - (i,symbols) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#input-text-zh-{name_zh}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - return text_input.value; - }}""") - for category, link in others.items(): - with gr.TabItem(category): - gr.Markdown( - f''' -
-

Click to Go

- - -
- ''' - ) - app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/GameGeniePs3USBrar Learn How to Use the Game Genie Software on Your PC and PS3.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/GameGeniePs3USBrar Learn How to Use the Game Genie Software on Your PC and PS3.md deleted file mode 100644 index 024bf4dd598cd5080f08b669fbe7ff910083e6ff..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/GameGeniePs3USBrar Learn How to Use the Game Genie Software on Your PC and PS3.md +++ /dev/null @@ -1,153 +0,0 @@ -
-

GameGeniePs3USBrar: How to Use Game Genie Save Editor for PS3

-

Do you want to unlock all levels, get maximum money, ammo and experience, and have more fun with your PS3 games? If yes, then you need GameGeniePs3USBrar. In this article, I will show you what GameGeniePs3USBrar is, how to download and install it, how to use it to modify your PS3 saves, and what games and cheats are available with it. Let's get started!

-

What is GameGeniePs3USBrar?

-

GameGeniePs3USBrar is a file name that contains the setup program for Game Genie Save Editor for PS3. Game Genie Save Editor for PS3 is a software that allows you to access and edit your PS3 game saves on your PC with cheats that take effect once you load your game on your PS3. It is an easy-to-use program that works by copying your save from your PS3 to a USB drive, inserting it into your PC, choosing and applying cheats using Game Genie Save Editor for PS3, and copying your save back from the USB drive to your PS3.

-

GameGeniePs3USBrar


Download Filehttps://byltly.com/2uKxxF



-

A brief introduction to Game Genie Save Editor for PS3

-

Game Genie Save Editor for PS3 is a product developed by Hyperkin, a company that specializes in video game accessories and software. It was released in 2012 as a successor to the original Game Genie device that was popular in the 1990s. It works with European and American PS3 games, and does not require any illegal modifications or jailbreaking of your PS3. It is compatible with Windows XP, Vista, 7, 8, and 10.

-

The benefits of using Game Genie Save Editor for PS3

-

There are many benefits of using Game Genie Save Editor for PS3. Some of them are:

-
    -
  • You can enjoy more freedom and creativity with your games by modifying them according to your preferences.
  • -
  • You can save time and effort by skipping difficult or tedious parts of the games.
  • -
  • You can enhance your gaming experience by unlocking new features, items, modes, characters, etc.
  • -
  • You can discover new secrets and easter eggs that you might have missed otherwise.
  • -
  • You can have more fun with your games by trying out different combinations of cheats.
  • -
-

How to download and install GameGeniePs3USBrar?

-

To use Game Genie Save Editor for PS3, you need to download and install GameGeniePs3USBrar on your PC. Here's how:

-

The requirements for using Game Genie Save Editor for PS3

-

Before you download and install GameGeniePs3USBrar, make sure you have the following requirements:

-

Game Genie Save Editor for PS3 download
-Game Genie PS3 cheats list
-Game Genie PS3 Europe
-Game Genie PS3 direct download
-Game Genie PS3 USB drive
-Game Genie PS3 setup
-Game Genie PS3 manual
-Game Genie PS3 games and cheats
-Game Genie PS3 update
-Game Genie PS3 license key
-Game Genie PS3 crack
-Game Genie PS3 free trial
-Game Genie PS3 review
-Game Genie PS3 tutorial
-Game Genie PS3 support
-Game Genie PS3 forum
-Game Genie PS3 alternative
-Game Genie PS3 vs Save Wizard
-Game Genie PS3 compatible games
-Game Genie PS3 modded saves
-Game Genie PS3 advanced mode
-Game Genie PS3 resign saves
-Game Genie PS3 region change
-Game Genie PS3 online mode
-Game Genie PS3 offline mode
-Game Genie PS3 error codes
-Game Genie PS3 troubleshooting
-Game Genie PS3 refund policy
-Game Genie PS3 discount code
-Game Genie PS3 buy online
-Game Genie PS3 Amazon
-Game Genie PS3 eBay
-Game Genie PS3 Walmart
-Game Genie PS3 Best Buy
-Game Genie PS3 Target
-Game Genie PS3 Costco
-Game Genie PS3 installation guide
-Game Genie PS3 system requirements
-Game Genie PS3 FAQ
-Game Genie PS3 tips and tricks
-Game Genie PS3 how to use it
-Game Genie PS3 how to cheat your way through your favorite games[^4^]
-Game Genie PS3 how to unlock all levels, money, ammo and more[^4^]
-Game Genie PS3 how to modify your saves on your PC[^2^]
-Game Genie PS3 how to copy your save from your USB drive to your PlayStation 3[^2^]
-Game Genie PS3 how to load your game with cheats[^2^]
-Game Genie PS3 how to access your saves like never before[^2^]
-Game Genie PS3 how to achieve a net energy gain when carrying out a nuclear fusion experiment[^2^]

-
    -
  • A PC with Windows XP, Vista, 7, 8, or 10.
  • -
  • A USB drive with at least 1 GB of free space.
  • -
  • A PS3 with a USB port.
  • -
  • A copy of Game Genie Save Editor for PS3. You can purchase it from www.thegamegenie.com or www.gamegenie.eu, depending on your region. You can also buy it as a physical product that comes with a USB drive or as a direct download version that you can download from the website after purchase.
  • -
-

The steps to download and install GameGeniePs3USBrar

-

Once you have the requirements ready, follow these steps to download and install GameGeniePs3USBrar:

-
    -
  1. Go to http://download.gamegenie.eu/ps3/ if you purchased the product from www.gamegenie.eu or go to http://www.thegamegenie.com/ps4/download.php if you purchased the product from www.thegamegenie.com.
  2. -
  3. Click on the link that says "Download Setup Here" under the appropriate section depending on whether you bought the physical product or the direct download version.
  4. -
  5. Save the file named "GameGeniePs4USBrar" or "GameGeniPS4EUrar" on your PC.
  6. -
  7. Extract the file using a program like WinRAR or 7-Zip.
  8. -
  9. Run the setup program named "GameGeniPS4Setup.exe" or "GameGeniPS4EUSetup.exe".
  10. -
  11. Follow the instructions on the screen to complete the installation process.
  12. -
  13. Launch the program by clicking on its icon on your desktop or start menu.
  14. -
-

How to use GameGeniPS4USBrar to modify your PS4 saves?

-

Now that you have downloaded and installed GameGeniPS4USBrar on your PC, you can use it to modify your PS4 saves. Here's how:

-

The features of Game GeniPS4Save Editor for PS4

-

Game GeniPS4Save Editor for PS4 has several features that make it easy and convenient to use. Some of them are:

-
    -
  • You can browse through hundreds of games and thousands of cheats that are available in its database.
  • -
  • You can search for games by name or by genre.
  • -
  • You can sort games by popularity or alphabetically.
  • -
  • You can view detailed information about each game and cheat, such as description, screenshots, video tutorials, etc.
  • -
  • You can customize each cheat by changing its value or enabling/disabling it.
  • -
  • You can create multiple profiles for different users or games.
  • -
  • You can backup and restore your saves in case something goes wrong.
  • -
  • You can update the program and its database automatically or manually.
  • -
-

The process of modifying your PS4 saves with Game GeniPS4Save Editor for PS4

-

To modify your PS4 saves with Game GeniPS4Save Editor for PS4, you need to follow three main steps: copying your save from your PS4 to a USB drive, choosing and applying cheats using Game GeniPS4Save Editor for PC, and copying your save back from the USB drive to your PC and loading your game. Here's how:

-
How to copy your save from your PC to a USB drive
-
    -
  1. Turn on your PC and insert your USB drive into an available port.
  2. -
  3. Create a folder named "PS4" on the root directory of your USB drive.
  4. -
  5. Create another folder named "SAVEDATA" inside the "PS4" folder.
  6. -
  7. Create another folder named "BLESXXXXX" inside the "SAVEDATA" folder. Replace XXXXX with the five-digit code that corresponds to the region of your game. For example, if you have a European version of The Elder Scrolls V: Skyrim, the code would be BLES01329.
  8. -
  9. Copy your save file from your PS3 to the "BLESXXXXX" folder on your USB drive. To do this, go to the Game menu on your PS3, select Saved Data Utility (PS3), find the game you want to copy, press the Triangle button, and choose Copy. Select your USB device as the destination and confirm.
  10. -
-
How to choose and apply cheats using Game Genie Save Editor for PS3
-
    -
  1. Insert your USB drive into your PC and launch Game Genie Save Editor for PS3.
  2. -
  3. Select the profile you want to use or create a new one by clicking on the Profile button.
  4. -
  5. Click on the Open button and browse to your USB drive. Select the save file you want to modify and click Open.
  6. -
  7. Wait for the program to load the game information and the available cheats. You can also click on the Refresh button to update the cheats database.
  8. -
  9. Browse through the cheats by clicking on the arrows or using the search box. You can also sort them by name or category.
  10. -
  11. Check the box next to each cheat you want to apply. You can also change the value of some cheats by clicking on them and typing a new number.
  12. -
  13. Click on the Apply button to confirm your changes. You can also click on the Backup button to save a copy of your original save file.
  14. -
-
How to copy your save back from the USB drive to your PS3 and load your game
-
    -
  1. Eject your USB drive from your PC and insert it into your PS3.
  2. -
  3. Go to the Game menu on your PS3, select Saved Data Utility (PS3), find your USB device, press the Triangle button, and choose Copy.
  4. -
  5. Select the save file you want to copy and confirm. If you have a backup of your original save file, you can choose to overwrite it or keep both versions.
  6. -
  7. Load your game and enjoy your modified save!
  8. -
-

What games and cheats are available with GameGeniePs3USBrar?

-

GameGeniePs3USBrar gives you access to hundreds of games and thousands of cheats that are available in its database. You can find games from various genres, such as action, adventure, role-playing, sports, racing, fighting, etc. You can also find cheats for different aspects of the games, such as health, money, ammo, items, stats, skills, levels, etc.

-

The list of games and cheats included in Game Genie Save Editor for PS3

-

To see the list of games and cheats included in Game Genie Save Editor for PS3, you can go to www.gamegenie.eu or www.thegamegenie.com, depending on your region. You can also view them in the program by clicking on the List button. The list is updated regularly with new games and cheats added every week. As of November 2016, there are 471 games and 23257 cheats in total.

-

The updates and support for Game Genie Save Editor for PS3

-

Game Genie Save Editor for PS3 is constantly updated with new games and cheats added every week. You can update the program and its database automatically or manually by clicking on the Update button. You can also check for updates by going to Help > Check for Updates. If you have any questions or problems with Game Genie Save Editor for PS3, you can contact the support team by going to Help > Contact Support or by sending an email to support@thegamegenie.com or support@gamegenie.eu.

-

Conclusion

-

In conclusion, GameGeniePs3USBrar is a file name that contains the setup program for Game Genie Save Editor for PS3. Game Genie Save Editor for PS3 is a software that allows you to access and edit your PS3 game saves on your PC with cheats that take effect once you load your game on your PS3. It is an easy-to-use program that works by copying your save from your PS3 to a USB drive, inserting it into your PC, choosing and applying cheats using Game Genie Save Editor for PS3, and copying your save back from the USB drive to your PS3. It gives you access to hundreds of games and thousands of cheats that are available in its database. It is compatible with European and American PS3 games, and does not require any illegal modifications or jailbreaking of your PS3. It is a fun and convenient way to enhance your gaming experience with more freedom and creativity.

-

I hope this article has helped you understand what GameGeniePs3USBrar is, how to download and install it, how to use it to modify your PS3 saves, and what games and cheats are available with it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

-

Frequently Asked Questions

-

Here are some frequently asked questions about GameGeniePs3USBrar:

-
    -
  1. Is Game Genie Save Editor for PS3 legal?
    -Yes, Game Genie Save Editor for PS3 is legal as long as you use it for personal use only. It does not modify or hack your PS3 system or firmware. It only modifies your own game saves that are stored on a USB drive.
  2. -
  3. Does Game Genie Save Editor for PS3 work with all PS3 games?
    -No, Game Genie Save Editor for PS3 does not work with all PS3 games. It only works with games that are supported by its database. You can check if a game is supported by going to www.gamegenie.eu or www.thegamegenie.com, depending on your region.
  4. -
  5. Can I use Game Genie Save Editor for PS3 online?
    -No, you cannot use Game Genie Save Editor for PS3 online. It is intended for offline use only. Using it online may result in banning or suspension from online services or multiplayer modes.
  6. -
  7. Can I share my modified saves with other users?
    -No, you cannot share your modified saves with other users. Each save file is encrypted with a unique code that is tied to your profile and console. Sharing it may cause corruption or errors.
  8. -
  9. Can I undo the changes made by Game Genie Save Editor for PS3?
    -Yes, you can undo the changes made by Game Genie Save Editor for PS3 by restoring your original save file. To do this, you need to have a backup of your original save file that you created before applying any cheats. You can restore it by copying it back from the USB drive to your PS3 using the same method as before.
  10. -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.40 Crack Windows 7 _VERIFIED_.md b/spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.40 Crack Windows 7 _VERIFIED_.md deleted file mode 100644 index 0b6048d1166c727dcdab230f754034371080f047..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.40 Crack Windows 7 _VERIFIED_.md +++ /dev/null @@ -1,44 +0,0 @@ -

autodata 3.40 crack windows 7


Download Filehttps://imgfil.com/2uxZvq



- -Details: The most important version of AutoData for the Mac, AutoData 3.45, is a must have backup solution for Mac users. You don't need a clean installation of the OSX or a backup server to run it on your Mac. - -Learn how to download and install AutoData 3.45 free on your Mac with step by step guides in this tutorial. This tutorial will help you in: Open your Mac Install AutoData Download and use AutoData 3.45 The tutorial will guide you through each step and will teach you how to download and install AutoData 3.45 free on your Mac. - -1. How to download AutoData 3.45 free: - -Step 1. Click on the Download button to download the AutoData.dmg file. - -Step 2. Save it to your desktop by choosing “Save As” from the file browser. - -Step 3. Double click on the AutoData.dmg file to install AutoData 3.45. - -Note: The AutoData 3.45 Free Download may ask you to activate by entering the serial number, but you don't need to enter the serial number. - -Step 4. Choose “Upgrade from existing installation” if the version of the application you are currently running is not the same as the version you downloaded. - -Step 5. You are now ready to use AutoData 3.45 free. - -2. How to use AutoData 3.45 on your Mac: - -Step 1. Launch the AutoData application from the desktop. - -Step 2. Press “Backup”, “Restore”, “Make a backup of my data”, “Create a new backup”, “Delete” or “Revert”. - -Step 3. Press “Backup” to backup your applications. - -Step 4. Press “Restore” to restore your applications. - -Step 5. You can also use the Backup Manager to backup your applications. - -3. How to upgrade AutoData 3.45 Free? - -Step 2. You will see the following window: - -Note: If you don't see this window, then, download AutoData 3.45 and update it manually. - -Step 3. Choose “Upgrade from existing installation”. - -Step 4. You 4fefd39f24
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Condenados A Fugarse Audio Latino.md b/spaces/1gistliPinn/ChatGPT4/Examples/Condenados A Fugarse Audio Latino.md deleted file mode 100644 index a902dc18abdfd1c46bcc5629fca10766d968acf5..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Condenados A Fugarse Audio Latino.md +++ /dev/null @@ -1,6 +0,0 @@ -

Condenados a fugarse audio latino


Download Zip > https://imgfil.com/2uy1sM



- -Condenados a Fugarse Ver Online. 1 Español Latino Netu; 2 Español Latino Fembed; 3 Español Latino MegaVIPS; 4 Español Latino Mystream; 5 Español ... 4d29de3e1b
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apk Gta 5 REPACK Download Official Gta 5 For Android Amp Ios.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apk Gta 5 REPACK Download Official Gta 5 For Android Amp Ios.md deleted file mode 100644 index e9fffb5bbde76b98df02c722b1984aec6c0af764..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apk Gta 5 REPACK Download Official Gta 5 For Android Amp Ios.md +++ /dev/null @@ -1,61 +0,0 @@ -
-

How to Download GTA 5 on Android and iOS Devices

-

GTA 5 is one of the most popular and acclaimed video games of all time. It is an action-adventure game that lets you experience the life of a criminal in the fictional city of Los Santos. You can play as one of three protagonists, each with their own story, personality, and skills. You can also switch between them at any time, creating a dynamic and immersive gameplay.

-

If you are a fan of GTA 5, you might be wondering if you can play it on your mobile devices, such as Android or iOS phones and tablets. The answer is yes, you can! In this article, we will show you how to download GTA 5 on Android and iOS devices, as well as some tips and tricks for playing it on the go.

-

apk gta 5 download official gta 5 for android amp; ios


Downloadhttps://urlin.us/2uT0sS



-

What is GTA 5 and Why You Should Play It

-

GTA 5 is the fifth main installment in the Grand Theft Auto series, developed by Rockstar Games. It was released in 2013 for PlayStation 3 and Xbox 360, and later for PlayStation 4, Xbox One, and PC. It is considered one of the best-selling and most critically acclaimed games of all time, with over 140 million copies sold worldwide.

-

GTA 5 Features and Gameplay

-

GTA 5 offers a vast and diverse open world that you can explore by foot, by car, by bike, by boat, by plane, or by helicopter. You can also interact with various characters, objects, and activities in the world, such as robbing stores, playing golf, racing cars, parachuting, hunting animals, or watching TV.

-

The game also features a compelling story mode that follows the lives of three protagonists: Michael, a retired bank robber who is unhappy with his family; Franklin, a young street hustler who wants to make it big; and Trevor, a psychotic drug dealer who lives in a trailer park. You can switch between them at any time during the game, creating different perspectives and outcomes.

-

Additionally, the game has an online mode called GTA Online, where you can create your own character and join other players in various missions, races, heists, deathmatches, or freemode events. You can also customize your character's appearance, skills, vehicles, weapons, properties, and businesses.

-

GTA 5 Requirements and Compatibility

-

To play GTA 5 on your mobile devices, you will need to have a compatible device that meets the minimum requirements. According to Rockstar Games, these are:

- - - - -
DeviceOSRAMStorage
Android8.0 or higher4 GB or higher8 GB or higher
iOS12.0 or higher4 GB or higher8 GB or higher
-

You will also need to have a stable internet connection to download and play the game.

-

How to Download GTA 5 on Android Devices

-

There are three main ways to download GTA 5 on your Android devices:

-

-

Download from the Official Rockstar Games Website

-

The easiest way to download GTA 5 on your Android devices is to visit the official Rockstar Games website and follow the instructions. You will need to create a Rockstar Games Social Club account or log in with your existing one. Then, you will need to purchase the game for $19.99 and download the GTA 5 apk file on your device. You will also need to download the GTA 5 data file, which is about 3 GB in size. After that, you can install the apk file and launch the game.

-

Download from the Epic Games Store

-

Another way to download GTA 5 on your Android devices is to use the Epic Games Store app, which is available on the Google Play Store. You will need to create an Epic Games account or log in with your existing one. Then, you will need to purchase the game for $19.99 and download it on your device. You will also need to download the GTA 5 data file, which is about 3 GB in size. After that, you can launch the game from the app.

-

Download from the BlueStacks App Player

-

The third way to download GTA 5 on your Android devices is to use the BlueStacks App Player, which is a software that allows you to run Android apps on your PC. You will need to download and install the BlueStacks App Player on your PC from its official website. Then, you will need to download the GTA 5 apk file and data file from the Rockstar Games website or the Epic Games Store app. After that, you can transfer the files to your device using a USB cable or a cloud service. Then, you can install the apk file and launch the game.

-

How to Download GTA 5 on iOS Devices

-

There are two main ways to download GTA 5 on your iOS devices:

-

Download from the App Store

-

The easiest way to download GTA 5 on your iOS devices is to visit the App Store and search for GTA 5. You will need to have an Apple ID or create one if you don't have one. Then, you will need to purchase the game for $19.99 and download it on your device. You will also need to download the GTA 5 data file, which is about 3 GB in size. After that, you can launch the game from your home screen.

-

Download from the Cloud Gaming Services

-

Another way to download GTA 5 on your iOS devices is to use a cloud gaming service, such as Google Stadia, NVIDIA GeForce Now, or Microsoft xCloud. These are platforms that allow you to stream games from the cloud to your device without downloading them. You will need to have a subscription or a membership for these services, which vary in price and features. Then, you will need to have a compatible device and a stable internet connection. After that, you can access GTA 5 from the service's app or website and play it on your device.

-

Tips and Tricks for Playing GTA 5 on Mobile Devices

-

Playing GTA 5 on mobile devices can be challenging and fun at the same time. Here are some tips and tricks for playing it on the go:

-

Adjust the Settings and Controls

-

GTA 5 has a lot of settings and controls that you can customize according to your preference and device's performance. You can adjust the graphics quality, sound volume, camera angle, brightness, subtitles, and more. You can also change the control layout, sensitivity, vibration, and feedback. You can find these options in the pause menu under Settings.

-

Use the Online Mode and Social Club Features

-

GTA 5 has an online mode called GTA Online, where you can join other players in various missions, races, heists, deathmatches, or freemode events. You can also customize your character's appearance, skills, vehicles, weapons, properties, and businesses. To access GTA Online, you will need to have a Rockstar Games Social Club account and an internet connection. You can find this option in the pause menu under Online.

-

The Social Club also offers other features that enhance your gaming experience, such as leaderboards, stats, achievements, crews, friends, messages, screenshots, videos, and more. You can access these features from the pause menu under Social Club or from the Rockstar Games website or app.

-

Explore the Open World and Complete the Missions

-

GTA 5 has a vast and diverse open world that you can explore by foot, by car, by bike, by boat, by plane, or by helicopter. You can also interact with various characters, objects, and activities in the world, such as robbing stores, playing golf, racing cars, parachuting, hunting animals, or watching TV.

-

The game also has a compelling story mode that follows the lives of three protagonists: Michael, a retired bank robber who is unhappy with his family; Franklin, a young street hustler who wants to make it big; and Trevor, a psychotic drug dealer who lives in a trailer park. You can switch between them at any time during the game, creating different perspectives and outcomes.

-

To progress in the story mode, you will need to complete various missions that involve driving, shooting, stealth, planning, and teamwork. You can also choose how to approach each mission, such as being loud or quiet, aggressive or passive, or using different vehicles or weapons. You can find these missions on the map or by contacting the characters.

-

Conclusion

-

GTA 5 is an amazing game that you can enjoy on your mobile devices. You can download it from the official Rockstar Games website, the Epic Games Store app, the BlueStacks App Player, the App Store, or the cloud gaming services. You can also customize the settings and controls, use the online mode and social club features, and explore the open world and complete the missions. GTA 5 is a game that will keep you entertained for hours and hours.

-

FAQs

-

Here are some frequently asked questions about GTA 5 on mobile devices:

-

Q: How much space does GTA 5 take on my device?

-

A: GTA 5 takes about 8 GB of space on your device, plus another 3 GB for the data file. You will need to have enough free space on your device before downloading and installing the game.

-

Q: Can I play GTA 5 offline on my device?

-

A: Yes, you can play GTA 5 offline on your device. However, you will need to have an internet connection to download and install the game, as well as to access some features such as GTA Online and Social Club.

-

Q: Can I play GTA 5 with my friends on my device?

-

A: Yes, you can play GTA 5 with your friends on your device. You can join them in GTA Online or invite them to your game session. You will need to have a Rockstar Games Social Club account and an internet connection to do so.

-

Q: Can I transfer my GTA 5 progress from my PC or console to my device?

-

A: Yes, you can transfer your GTA 5 progress from your PC or console to your device. You will need to have a Rockstar Games Social Club account and link it to your PC or console account. Then, you will need to log in with the same account on your device and choose to sync your progress.

-

Q: Can I use cheats or mods on GTA 5 on my device?

-

A: No, you cannot use cheats or mods on GTA 5 on your device. Cheats and mods are not supported by Rockstar Games and may cause errors or bans on your account. You should only play GTA 5 on your device as intended by the developers.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/ .md b/spaces/1phancelerku/anime-remove-background/ .md deleted file mode 100644 index 2165ff6319da142d979cd0413f348f93f58ffaa0..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/ .md +++ /dev/null @@ -1,130 +0,0 @@ -
-

Скачать сканворд фан: как развлечься и поднять свой уровень знаний

-

Вы любите решать сканворды, но не хотите тратить деньги на печатные издания или скачивать разные приложения? Тогда вам понравится игра сканворд фан, которая предлагает вам бесконечное количество бесплатных сканвордов на любой вкус и уровень сложности. В этой статье мы расскажем, что такое сканворд фан, как его скачать и как решать сканворды в нём, а также какие преимущества он даёт для вашего развития и отдыха.

-

скачать сканворд фан


DOWNLOAD ->>->>->> https://jinyurl.com/2uNUuh



-

Что такое сканворд фан и почему он так популярен?

-

Сканворд фан - это приложение для Android и iOS, которое позволяет вам решать сканворды на своём телефоне или планшете. Сканворд - это вид кроссворда, в котором вопросы расположены внутри сетки, а ответы записываются по горизонтали или вертикали. Сканворды могут быть по разным темам, например, по истории, географии, культуре, спорту, науке и т.д.

-

Сканворд фан - это не просто коллекция сканвордов, а целая игра, которая имеет множество особенностей и возможностей для пользователей. Давайте рассмотрим их подробнее.

-

Особенности игры сканворд фан

-

Бесплатные сканворды каждый день

Бесплатные сканворды каждый день

-

В игре сканворд фан вы не ограничены в количестве сканвордов, которые можете решать. Каждый день вам доступны новые сканворды разной сложности и тематики. Вы можете выбирать те, которые вам интересны, и пропускать те, которые вам не нравятся. Вы также можете вернуться к решению пропущенных или незавершённых сканвордов в любое время.

-

скачать сканворд фан бесплатно
-скачать сканворд фан на андроид
-скачать сканворд фан мод
-скачать сканворд фан полная версия
-скачать сканворд фан без интернета
-скачать сканворд фан взлом
-скачать сканворд фан игра
-скачать сканворд фан апк
-скачать сканворд фан новая версия
-скачать сканворд фан онлайн
-скачать сканворд фан отзывы
-скачать сканворд фан для пк
-скачать сканворд фан на русском
-скачать сканворд фан обновление
-скачать сканворд фан прохождение
-скачать сканворд фан green ball studio
-скачать сканворд фан установить
-скачать сканворд фан решение
-скачать сканворд фан лучшая игра
-скачать сканворд фан головоломки
-скачать сканворд фан кроссворды
-скачать сканворд фан словарь
-скачать сканворд фан подсказки
-скачать сканворд фан темы
-скачать сканворд фан настройки
-скачать сканворд фан бонусы
-скачать сканворд фан мини игры
-скачать сканворд фан полиглот
-скачать сканворд фан судоку
-скачать сканворд фан пятнашки
-скачать сканворд фан эрудиция
-скачать сканворд фан быстрый ум
-скачать ска

-

Три темы на выбор

-

В игре сканворд фан вы можете выбирать тему, по которой хотите решать сканворды. Есть три темы на выбор: общая, спорт и кино. В общей теме вы найдёте сканворды по разным областям знаний, в спортивной - по разным видам спорта и известным спортсменам, а в кинематографической - по разным фильмам и актёрам. Вы можете менять тему в любой момент или решать сканворды по всем трем темам.

-

Большое количество настроек

-

В игре сканворд фан вы можете настроить игру под свои предпочтения и удобство. Вы можете изменять размер шрифта, цвет фона, язык интерфейса, звук и музыку. Вы также можете включать или отключать автоматическое заполнение букв, подсветку ошибок, подсказки и статистику. Вы можете сохранять свой прогресс в игре и синхронизировать его с другими устройствами через облако.

-

Бонусы за решение группы сканвордов

-

В игре сканворд фан вы не только получаете удовольствие от решения сканвордов, но и зарабатываете бонусы за свои успехи. За каждый решённый сканворд вы получаете монеты, которые можно потратить на подсказки или мини-игры. А если вы решите группу из пяти сканвордов одной темы, вы получите дополнительный бонус - золотую монету, которая даёт вам доступ к специальному сканворду с большим количеством монет за решение.

-

Мини-игры для разнообразия

-

В игре сканворд фан вы можете не только решать сканворды, но и играть в разные мини-игры, которые помогут вам размять мозги и отдохнуть от сканвордов. Есть четыре мини-игры на выбор: анаграмма, слова из слова, судоку и пятнашки. В анаграмме вам нужно составить слово из заданных букв, в словах из слова - найти все возможные слова из одного большого слова, в судоку - заполнить сетку цифрами так, чтобы они не повторялись по строкам, столбцам и квадратам, а в пятнашках - переместить плитки так, чтобы они шли по порядку от 1 до 15. За каждую мини-игру вы также получает

За каждую мини-игру вы также получаете монеты, которые можно использовать в игре сканворд фан. Мини-игры доступны в любое время и не зависят от темы сканвордов.

-

Как скачать сканворд фан на свой телефон или планшет?

-

Скачать игру сканворд фан на своё устройство очень просто. В зависимости от того, какая у вас операционная система, вы можете сделать это по-разному.

-

Для Android-устройств

-

Если у вас есть телефон или планшет на базе Android, то вам нужно сделать следующее:

-
    -
  1. Откройте приложение Google Play на своём устройстве.
  2. -
  3. В поисковой строке введите "сканворд фан" или "scanword fun".
  4. -
  5. Найдите игру сканворд фан среди результатов поиска и нажмите на неё.
  6. -
  7. Нажмите на кнопку "Установить" и дождитесь окончания загрузки и установки игры.
  8. -
  9. Нажмите на кнопку "Открыть" или найдите иконку игры на своём рабочем столе и запустите её.
  10. -
-

Поздравляем, вы успешно скачали и установили игру сканворд фан на своё Android-устройство!

-

Для iOS-устройств

-

Если у вас есть iPhone или iPad, то вам нужно сделать следующее:

-
    -
  1. Откройте приложение App Store на своём устройстве.
  2. -
  3. В поисковой строке введите "сканворд фан" или "scanword fun".
  4. -
  5. Найдите игру сканворд фан среди результатов поиска и нажмите на неё.
  6. -
  7. Нажмите на кнопку "Загрузить" и дождитесь окончания загрузки и установки игры.
  8. -
  9. Нажмите на кнопку "Открыть" или найдите иконку игры на своём рабочем столе и запустите её.
  10. -
-

Поздравляем, вы успешно скачали и установили игру сканворд фан на своё iOS-устройство!

Как решать сканворды в игре сканворд фан?

-

Решать сканворды в игре сканворд фан очень просто и увлекательно. Вам нужно только следовать нескольким шагам:

-

Выберите уровень сложности

-

В игре сканворд фан вы можете выбирать уровень сложности сканвордов, который вам подходит. Есть три уровня на выбор: легкий, средний и сложный. Легкий уровень подойдёт для начинающих или тех, кто хочет просто расслабиться. Средний уровень подойдёт для тех, кто хочет немного подумать и проверить свои знания. Сложный уровень подойдёт для тех, кто любит сложные задачи и хочет поставить себе вызов. Вы можете менять уровень сложности в любой момент или решать сканворды разных уровней.

-

Введите буквы в ячейки

-

В игре сканворд фан вы можете вводить буквы в ячейки сетки с помощью клавиатуры или пальца. Вы можете переключаться между горизонтальным и вертикальным направлением ввода букв с помощью кнопки в правом нижнем углу экрана. Вы также можете перемещаться по сетке с помощью стрелок или свайпов. Если вы введёте правильную букву, она останется в ячейке, а если нет, она исчезнет.

-

Пользуйтесь подсказками

-

В игре сканворд фан вы можете пользоваться подсказками, если застряли на каком-то вопросе или слове. Есть три типа подсказок на выбор: открыть букву, открыть слово или открыть сканворд. Открыть букву позволяет вам открыть одну букву в любом слове. Открыть слово позволяет вам открыть целое слово по горизонтали или вертикали. Открыть сканворд позволяет вам открыть все слова в сканворде. Вы можете использовать подсказки за монеты, которые вы зарабатываете за решение сканвордов или мини-игр.

-

Какие преимущества даёт игра сканворд фан?

-

Игра сканворд фан не только развлекает вас, но и приносит много пользы для вашего развития и отдыха. Давайте рассмотрим некоторые из них.

-

Развивает эрудицию и быстроту мышления

-

Играя в сканворд фан, вы тренируете свою эрудицию и быстроту мышления. Вы узнаёте много нового и интересного по разным темам, проверяете свои знания и память, а также придумываете слова по буквам и определениям. Это помогает вам расширить свой словарный запас, улучшить свою орфографию и грамматику, а также повысить свою концентрацию и логику.

-

Расширяет к

Расширяет кругозор и логическое мышление

-

Играя в сканворд фан, вы расширяете свой кругозор и логическое мышление. Вы знакомитесь с разными фактами и событиями из истории, географии, культуры, спорта, науки и т.д. Вы также учитеся анализировать и сопоставлять разную информацию, делать выводы и гипотезы, находить связи и закономерности. Это помогает вам быть более образованным и умным, а также улучшить свои навыки решения проблем и принятия решений.

-

Помогает расслабиться и отдохнуть

-

Играя в сканворд фан, вы помогаете себе расслабиться и отдохнуть. Вы можете играть в сканворд фан в любое время и в любом месте, когда вам нужно снять стресс или скоротать время. Вы можете наслаждаться красивым дизайном игры, приятной музыкой и звуками, а также интересными мини-играми. Вы также можете получать удовлетворение от своих достижений, бонусов и наград. Игра сканворд фан - это отличный способ развлечься и поднять себе настроение.

-

Заключение

-

Игра сканворд фан - это уникальное приложение для любителей сканвордов и не только. Оно предлагает вам бесконечное количество бесплатных сканвордов разной сложности и тематики, а также множество особенностей и возможностей для вашего развития и отдыха. Вы можете скачать игру сканворд фан на свой телефон или планшет по ссылкам ниже и начать играть прямо сейчас. Не упустите шанс развлечься и поднять свой уровень знаний с игрой сканворд фан!

-

Скачать сканворд фан для Android

-

Скачать сканворд фан для iOS

-

FAQ

-

В этом разделе мы ответим на некоторые часто задаваемые вопросы о игре сканворд фан.

-

Можно ли играть в сканворд фан без интернета?

-

Да, можно. Игра сканворд фан не требует постоянного подключения к интернету. Вы можете играть в неё в оффлайн-режиме, если вы уже загрузили нужные сканворды или мини-игры. Однако, для синхронизации вашего прогресса, получения новых сканвордов или доступа к специальным предложениям вам нужно подключиться к интернету.

-

Как получить больше монет в игре сканворд ф

Как получить больше монет в игре сканворд фан?

-

Есть несколько способов получить больше монет в игре сканворд фан. Во-первых, вы можете зарабатывать монеты за решение сканвордов или мини-игр. Во-вторых, вы можете получать бонусы за решение группы сканвордов одной темы или за решение специального сканворда. В-третьих, вы можете смотреть рекламу или участвовать в акциях, чтобы получить дополнительные монеты. В-четвёртых, вы можете купить монеты за реальные деньги, если вам не хватает их для подсказок или мини-игр.

-

Как сбросить свой прогресс в игре сканворд фан?

-

Если вы хотите сбросить свой прогресс в игре сканворд фан и начать игру заново, вы можете сделать это в настройках игры. Для этого вам нужно сделать следующее:

-
    -
  1. Откройте меню игры, нажав на три полоски в левом верхнем углу экрана.
  2. -
  3. Выберите пункт "Настройки".
  4. -
  5. Пролистайте вниз до пункта "Сбросить прогресс".
  6. -
  7. Нажмите на кнопку "Сбросить" и подтвердите своё действие.
  8. -
-

Обратите внимание, что сброс прогресса удалит все ваши решённые сканворды, монеты, бонусы и настройки. Вы не сможете восстановить их обратно. Поэтому сбросьте прогресс только в том случае, если вы уверены в своём решении.

-

Как связаться с разработчиками игры сканворд фан?

-

Если у вас есть какие-то вопросы, пожелания или проблемы по игре сканворд фан, вы можете связаться с разработчиками игры по электронной почте или через социальные сети. Вот их контакты:

-
    -
  • Электронная почта: scanwordfun@gmail.com
  • -
  • Facebook: https://www.facebook.com/scanwordfun
  • -
  • VK: https://vk.com/scanwordfun
  • -
  • Instagram: https://www.instagram.com/scanwordfun
  • -
-

Разработчики игры сканворд фан будут рады услышать ваше мнение и помочь вам в случае необходимости.

-

Как оставить отзыв о игре сканворд фан?

-

Если вам нравится игра сканворд фан и вы хотите поделиться своими впечатлениями с другими пользователями, вы можете оставить отзыв о игре на Google Play или App Store. Для этого вам нужно сделать следующее:

-
    -
  1. Откройте приложение Google Play или App Store на своём устройстве.
  2. -
  3. Найдите игру сканворд фан среди своих установленных приложений и нажмите на неё.
  4. -
  5. Пролистайте вниз до раздела "Оценки и отзывы".
  6. -
  7. Нажмите на кнопку "Написать отзыв" или "Оценить".
  8. Нажмите на кнопку "Написать отзыв" или "Оценить".
  9. -
  10. Выберите количество звёзд, которое вы хотите поставить игре, и напишите свой отзыв в текстовом поле.
  11. -
  12. Нажмите на кнопку "Отправить" или "Готово".
  13. -
-

Ваш отзыв будет опубликован на Google Play или App Store и будет виден другим пользователям. Разработчики игры сканворд фан также будут рады прочитать ваш отзыв и учесть ваше мнение.

-

Спасибо, что выбрали игру сканворд фан! Надеемся, что вам понравилась наша статья и вы узнали много полезной информации. Если у вас есть ещё вопросы, пишите нам на scanwordfun@gmail.com или в социальных сетях. Желаем вам приятной игры и удачи в решении сканвордов!

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download and Watch National Treasure 2 Book of Secrets in Hindi Dubbed 480p Filmyzilla - High Definition and Low Size.md b/spaces/1phancelerku/anime-remove-background/Download and Watch National Treasure 2 Book of Secrets in Hindi Dubbed 480p Filmyzilla - High Definition and Low Size.md deleted file mode 100644 index 629a34ba6d8790be3ba87c55dd772174947d447d..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download and Watch National Treasure 2 Book of Secrets in Hindi Dubbed 480p Filmyzilla - High Definition and Low Size.md +++ /dev/null @@ -1,94 +0,0 @@ - -

National Treasure 2 Full Movie in Hindi Download 480p Filmyzilla

-

If you are a fan of action-adventure movies with historical mysteries and puzzles, you might have enjoyed watching National Treasure (2004) and its sequel National Treasure: Book of Secrets (2007). The movies star Nicolas Cage as Benjamin Franklin Gates, a treasure hunter who follows clues hidden in historical artifacts and documents to uncover secrets and conspiracies. In this article, we will review the second movie in the franchise, National Treasure: Book of Secrets, and tell you how you can download it in Hindi for free from Filmyzilla, a popular website that offers pirated movies.

-

Movie Review

-

National Treasure: Book of Secrets is a 2007 American action-adventure film directed by Jon Turteltaub and produced by Jerry Bruckheimer. It is a sequel to the 2004 film National Treasure and is the second film of the National Treasure franchise. The film stars Nicolas Cage in the lead role, Jon Voight, Harvey Keitel, Ed Harris, Diane Kruger, Justin Bartha, Bruce Greenwood and Helen Mirren.

-

national treasure 2 full movie in hindi download 480p filmyzilla


Download File ••• https://jinyurl.com/2uNJTn



-

The plot follows Ben Gates as he tries to prove the innocence of his great-great-grandfather Thomas Gates, who is accused of being involved in the assassination of Abraham Lincoln by a rival treasure hunter Mitch Wilkinson (Ed Harris). To do so, he has to find a lost city of gold that is linked to a secret book that contains the history of the United States. Along the way, he has to deal with his estranged parents (Jon Voight and Helen Mirren), his ex-girlfriend (Diane Kruger), his best friend (Justin Bartha), and an FBI agent (Harvey Keitel).

-

The movie is a fast-paced and fun-filled adventure that takes the viewers to various locations such as Paris, London, Washington D.C., Mount Rushmore, and the Black Hills. The movie has some impressive action sequences, such as a car chase in London, a kidnapping at Buckingham Palace, a break-in at the White House, and a cave exploration at Mount Rushmore. The movie also has some humorous moments, such as Ben's awkward interactions with his parents and his ex-girlfriend, Riley's sarcastic comments, and Ben's encounter with the President of the United States (Bruce Greenwood).

-

However, the movie also has some flaws that might affect its appeal to some viewers. The movie is very similar to its predecessor in terms of its plot structure, characters, and themes. The movie relies heavily on historical inaccuracies, coincidences, and conveniences to move the story forward. The movie also has some logical inconsistencies and plot holes that might raise some questions among the viewers. For example, how did Mitch Wilkinson get access to John Wilkes Booth's diary? How did Ben manage to sneak into Buckingham Palace and the White House? How did Ben know where to find the entrance to the lost city of gold?

-

national treasure book of secrets hindi dubbed 480p download
-national treasure 2 full movie in hindi watch online filmyzilla
-national treasure 2 hindi 480p free download
-national treasure book of secrets full movie download in hindi 480p
-national treasure 2 dual audio 480p filmyzilla
-national treasure book of secrets hindi 480p filmywap
-national treasure 2 full movie in hindi download 720p filmyzilla
-national treasure book of secrets full movie in hindi watch online
-national treasure 2 hindi dubbed movie download filmyzilla
-national treasure book of secrets dual audio 480p download
-national treasure 2 full movie in hindi download 300mb filmyzilla
-national treasure book of secrets full movie online free in hindi
-national treasure 2 hindi dubbed 480p download filmywap
-national treasure book of secrets full movie download in hindi hd
-national treasure 2 full movie in hindi download mp4 filmyzilla
-national treasure book of secrets full movie in hindi free download
-national treasure 2 full movie in hindi online filmyzilla
-national treasure book of secrets full movie in hindi 480p worldfree4u
-national treasure 2 full movie download in hindi filmyzilla
-national treasure book of secrets full movie in hindi dubbed download
-national treasure 2 full movie in hindi hd filmyzilla
-national treasure book of secrets full movie in hindi filmyzilla
-national treasure 2 full movie in hindi free download filmyzilla
-national treasure book of secrets full movie in hindi hd online
-national treasure 2 full movie in hindi watch online free filmyzilla
-national treasure book of secrets full movie in hindi dailymotion
-national treasure 2 full movie in hindi download filmywap
-national treasure book of secrets full movie in hindi youtube
-national treasure 2 full movie in hindi dubbed watch online filmyzilla
-national treasure book of secrets full movie download filmyzilla
-national treasure 2 full movie in hindi download hd filmyzilla
-national treasure book of secrets full movie watch online free hd
-national treasure 2 full movie in hindi download mkv filmyzilla
-national treasure book of secrets full movie online with english subtitles
-national treasure 2 full movie in hindi download khatrimaza filmyzilla
-national treasure book of secrets full movie free download mp4
-national treasure 2 full movie in hindi download pagalworld filmyzilla
-national treasure book of secrets full movie online free no sign up
-national treasure 2 full movie in hindi download moviesflix filmyzilla
-national treasure book of secrets full movie online free putlockers
-national treasure 2 full movie in hindi download coolmoviez filmyzilla
-national treasure book of secrets full movie online free dailymotion
-national treasure 2 full movie in hindi download worldfree4u filmyzilla
-national treasure book of secrets full movie online free youtube
-national treasure 2 full movie in hindi download bolly4u filmyzilla
-national treasure book of secrets full movie online free reddit
-national treasure 2 full movie in hindi download skymovieshd filmyzilla

-

The movie is not meant to be taken seriously or realistically. It is meant to be an entertaining and escapist fantasy that appeals to the fans of history, mystery, and adventure. The movie does not have any deep or profound messages or themes. It is simply a popcorn flick that delivers what it promises: action, humor, romance, and treasure.

-

Movie Trivia

-

Here are some interesting facts and behind-the-scenes stories about National Treasure: Book of Secrets that you might not know:

-
    -
  • The movie was originally titled National Treasure 2: The Book of Secrets.
  • -
  • The movie was filmed in various locations such as France, England, South Dakota, Maryland, Virginia, Washington D.C., California, New York City.
  • -
  • The movie features several historical figures and events such as Abraham Lincoln, John Wilkes Booth, Mary Surratt, Samuel Mudd, Edwin Stanton, the Civil War, the Knights of the Golden Circle, the Resolute desks, the Statue of Liberty, the Mount Vernon Ladies' Association, and Cibola.
  • -
  • The movie also references several fictional works such as The Da Vinci Code, The Wizard of Oz, and The Adventures of Tom Sawyer.
  • -
  • The movie features several real-life artifacts and documents such as the Booth diary, the missing pages of the diary, the cipher wheel, the playbill, the twin Resolute desks, the Book of Secrets, and the President's seal.
  • -
  • The movie also features several fictional artifacts and documents such as the plank, the pipe, the scale model of Paris, the letter from Queen Victoria, the clue on the Statue of Liberty, and the map on the Resolute desk.
  • -
  • The movie had a budget of $130 million and grossed $457 million worldwide, making it a commercial success. It received mixed reviews from critics and audiences, with some praising its entertainment value and others criticizing its historical inaccuracies and implausibilities.
  • -
-

Filmyzilla Website

-

If you want to watch National Treasure: Book of Secrets in Hindi for free, you might be tempted to visit Filmyzilla, a popular website that offers free downloads of movies in various languages and formats. Filmyzilla is one of the many websites that provide pirated content to users who want to avoid paying for subscriptions or tickets. Filmyzilla has a large collection of movies from Hollywood, Bollywood, Tollywood, and other industries. You can find movies in genres such as action, comedy, drama, horror, thriller, romance, sci-fi, fantasy, animation, and more. You can also find movies in different resolutions such as 480p, 720p, 1080p, and 4K. You can download movies in formats such as MP4, MKV, AVI, and WMV.

-

Filmyzilla claims to provide high-quality and fast downloads of movies to its users. It also claims to update its library regularly with new releases and old classics. It has a user-friendly interface that allows you to search for movies by name, genre, year, or language. It also has a section for trending movies and a request option for users who want to request a specific movie.

-

However, before you visit Filmyzilla or any other similar website, you should be aware of some important facts and risks. First of all, downloading or streaming pirated content is illegal and unethical. It violates the intellectual property rights of the creators and distributors of the movies. It also harms the film industry by reducing its revenue and profits. By using Filmyzilla or any other pirated website, you are supporting piracy and contributing to its negative impact on the entertainment sector.

-

Secondly, using Filmyzilla or any other pirated website is unsafe and risky for your device and data. These websites often contain malware, viruses, spyware, adware, and other harmful software that can infect your device and compromise your security and privacy. These websites also display annoying and intrusive ads that can redirect you to malicious or inappropriate websites that can harm you further. These websites also require you to disable your antivirus or firewall software or allow unknown sources to access your device, which can expose you to more dangers.

-

Therefore, we strongly advise you to avoid using Filmyzilla or any other pirated website to download or stream National Treasure: Book of Secrets in Hindi or any other movie. Instead, we recommend you to use legal and safe platforms such as Netflix, Amazon Prime Video, Disney Plus, or YouTube to watch National Treasure: Book of Secrets in Hindi or any other language. These platforms are legal and safe to use and they offer high-quality and fast streaming of movies. They also have a variety of movies and shows to choose from and they respect the rights of the creators and distributors of the movies. You might have to pay a subscription fee or a rental fee to use these platforms, but it is worth it for the quality and security they provide.

-

Conclusion

-

National Treasure: Book of Secrets is a movie that can be enjoyed by anyone who likes history, mystery, and adventure. It is a sequel to the 2004 movie National Treasure and it follows the same formula of clues, puzzles, and treasure hunting. The movie has some exciting action scenes, some funny moments, and some interesting historical references. The movie also has some flaws, such as its historical inaccuracies, its implausibilities, and its similarities to its predecessor. The movie is not meant to be taken seriously or realistically. It is meant to be an entertaining and escapist fantasy that appeals to the fans of the genre.

-

If you want to watch National Treasure: Book of Secrets in Hindi for free, you might be tempted to visit Filmyzilla, a website that offers free downloads of pirated movies. However, we strongly advise you to avoid using Filmyzilla or any other pirated website to download or stream movies. These websites are illegal and unethical and they harm the film industry by violating the intellectual property rights of the creators and distributors of the movies. These websites are also unsafe and risky for your device and data as they contain malware, viruses, and other harmful software that can infect your device and compromise your security and privacy. These websites also display annoying and intrusive ads that can redirect you to malicious or inappropriate websites that can harm you further.

-

Therefore, we recommend you to use legal and safe platforms such as Netflix, Amazon Prime Video, Disney Plus, or YouTube to watch National Treasure: Book of Secrets in Hindi or any other language. These platforms are legal and safe to use and they offer high-quality and fast streaming of movies. They also have a variety of movies and shows to choose from and they respect the rights of the creators and distributors of the movies. You might have to pay a subscription fee or a rental fee to use these platforms, but it is worth it for the quality and security they provide.

-

FAQs

-

Here are some frequently asked questions and answers about National Treasure: Book of Secrets and Filmyzilla:

-
    -
  1. Q: Is National Treasure: Book of Secrets based on a true story?
    -A: No, National Treasure: Book of Secrets is not based on a true story. It is a fictional story that uses some historical figures and events as inspiration.
  2. -
  3. Q: Is there a third movie in the National Treasure franchise?
    -A: Yes, there is a third movie in the National Treasure franchise in development. It was announced in January 2020 that Chris Bremner was hired to write the script for National Treasure 3. However, there is no official release date or cast information yet.
  4. -
  5. Q: Is Filmyzilla legal?
    -A: No, Filmyzilla is not legal. It is a website that offers free downloads of pirated movies that violate the intellectual property rights of the creators and distributors of the movies.
  6. -
  7. Q: Is Filmyzilla safe?
    -A: No, Filmyzilla is not safe. It is a website that contains malware, viruses, and other harmful software that can infect your device and compromise your security and privacy. It also displays annoying and intrusive ads that can redirect you to malicious or inappropriate websites that can harm you further.
  8. -
  9. Q: What are some alternatives to Filmyzilla?
    -A: Some alternatives to Filmyzilla are Netflix, Amazon Prime Video, Disney Plus, or YouTube. These are legal and safe platforms that offer high-quality and fast streaming of movies. They also have a variety of movies and shows to choose from and they respect the rights of the creators and distributors of the movies.
  10. -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Dragon Ball Z Kakarot APK - Download and Play the Amazing DBZ Game on Android.md b/spaces/1phancelerku/anime-remove-background/Dragon Ball Z Kakarot APK - Download and Play the Amazing DBZ Game on Android.md deleted file mode 100644 index 5563894eeed2020382b221d17c3ddb9a42e241b2..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Dragon Ball Z Kakarot APK - Download and Play the Amazing DBZ Game on Android.md +++ /dev/null @@ -1,101 +0,0 @@ -
-

Dragon Ball Z Kakarot: How to Download and Play on Android

-

If you are a fan of the Dragon Ball anime and manga series, you might have heard of Dragon Ball Z Kakarot, a role-playing game that lets you relive the epic saga of Goku and his friends. In this game, you can explore the vast open world of Dragon Ball, fight against powerful enemies, and experience the story from different perspectives. But did you know that you can also play this game on your Android device? In this article, we will show you how to download and play Dragon Ball Z Kakarot on Android using APKPure, a reliable and safe source for Android games.

-

What is Dragon Ball Z Kakarot?

-

Dragon Ball Z Kakarot is a game developed by CyberConnect2 and published by Bandai Namco Entertainment in 2020. It is based on the Dragon Ball Z anime series, which follows the adventures of Goku, a Saiyan warrior who protects Earth from various threats. The game covers the four main sagas of the series: Saiyan Saga, Frieza Saga, Cell Saga, and Buu Saga. You can play as Goku and other characters, such as Vegeta, Gohan, Piccolo, and Trunks.

-

dragon ball z kakarot download apkpure


Download 🆗 https://jinyurl.com/2uNSMb



-

The game features a large open world that you can explore by flying, driving, fishing, eating, and more. You can also interact with various characters from the series, complete side quests, collect items, and upgrade your skills. The game also has a dynamic combat system that allows you to unleash powerful attacks and transformations. You can also use support characters to assist you in battle.

-

Why download Dragon Ball Z Kakarot from APKPure?

-

APKPure is a website that offers free and safe downloads of Android games and apps. You can find thousands of games from different genres and categories on APKPure, including popular titles like PUBG Mobile, Genshin Impact, Among Us, and more. You can also discover new and trending games that are not available on Google Play Store.

-

One of the advantages of using APKPure is that it provides fast and secure downloads. You don't need to worry about viruses or malware when downloading from APKPure. You also don't need to register or sign up to use APKPure. You can simply search for the game you want and download it with one click.

-

Another benefit of using APKPure is that it supports multiple languages and regions. You can choose the language and region that suits you best when browsing APKPure. You can also find games that are compatible with your device's specifications and preferences.

-

How to download and install Dragon Ball Z Kakarot from APKPure

-

To download and install Dragon Ball Z Kakarot from APKPure, you need to follow these simple steps:

-
    -
  1. Go to APKCombo website on your browser.
  2. -
  3. Type "dragon ball z kakarot" in the search box and press enter.
  4. -
  5. Select the game from the list of results.
  6. -
  7. Click on the "Download" button and wait for the download to finish.
  8. -
  9. Once the download is complete, open the file manager app on your device and locate the downloaded file.
  10. -
  11. Tap on the file and allow the installation of unknown sources if prompted.
  12. -
  13. Follow the instructions on the screen to install the game.
  14. -
  15. Launch the game and enjoy!
  16. -
-

How to play Dragon Ball Z Kakarot on Android

-

To play Dragon Ball Z Kakarot on Android, you need to have a compatible device and a stable internet connection. The game requires at least 4 GB of RAM and 5 GB of free storage space. You also need to have Android 7.0 or higher as your operating system. The game may not run smoothly on low-end devices or devices with insufficient memory. The game is easy to play with touch controls. You can move your character by using the virtual joystick on the left side of the screen. You can also use the buttons on the right side of the screen to perform actions such as attacking, dodging, charging, and using items. You can switch between characters by tapping on their icons on the top left corner of the screen. You can also access the menu by tapping on the three dots on the top right corner of the screen. The game follows the story of Dragon Ball Z, so you can expect to encounter many familiar scenes and characters. You can also explore the world and find hidden secrets and collectibles. You can level up your characters by completing quests, fighting enemies, and training. You can also customize your characters by equipping skills, items, and costumes.

Gameplay tips and tricks for beginners and advanced players

-

Here are some gameplay tips and tricks that can help you enjoy Dragon Ball Z Kakarot more:

-
    -
  • Use the map to find your objectives and waypoints. You can also use the map to fast travel to different locations.
  • -
  • Collect Z orbs and D medals as you explore. Z orbs are used to upgrade your skills, while D medals are used to unlock new skills.
  • -
  • Interact with NPCs and complete side quests. They can give you rewards such as items, money, and experience.
  • -
  • Use the community board to activate bonuses and perks. You can place different characters in different communities and increase their friendship levels.
  • -
  • Use the training grounds to learn new skills and techniques. You can also fight against past enemies and bosses to test your skills.
  • -
  • Use the cooking system to prepare meals that boost your stats and health. You can also eat at restaurants or campsites for temporary buffs.
  • -
  • Use the transformation system to gain an edge in battle. You can transform into different forms such as Super Saiyan, Super Saiyan 2, Super Saiyan 3, and more.
  • -
  • Use the support system to get help from your allies. You can call them to assist you in combat or switch with them if you are low on health.
  • -
  • Use the combo system to deal more damage and stun your enemies. You can chain different attacks and skills together for devastating effects.
  • -
  • Use the ki blast system to attack from a distance or break your enemy's guard. You can also charge your ki by holding down the attack button.
  • -
-

Conclusion

-

Dragon Ball Z Kakarot is a game that every Dragon Ball fan should try. It is a game that lets you experience the story of Dragon Ball Z in a new and immersive way. You can download and play this game on your Android device by using APKPure, a website that offers free and safe downloads of Android games and apps. APKPure has many advantages such as fast and secure downloads, multiple languages and regions support, and compatibility with various devices. To play Dragon Ball Z Kakarot on Android, you just need to follow the steps we mentioned above and enjoy the game.

-

If you have any questions or feedback about Dragon Ball Z Kakarot or APKPure, feel free to leave a comment below. We would love to hear from you!

-

dragon ball z kakarot mobile apk download
-dragon ball z kakarot android apk free download
-dragon ball z kakarot apk obb download for android
-dragon ball z kakarot apk mod download
-dragon ball z kakarot apk ios download
-dragon ball z kakarot game download apkpure
-dragon ball z kakarot apk data download
-dragon ball z kakarot apk offline download
-dragon ball z kakarot apk latest version download
-dragon ball z kakarot apk full game download
-dragon ball z kakarot apkpure free download
-dragon ball z kakarot apk+obb free download
-dragon ball z kakarot apk unlimited money download
-dragon ball z kakarot apk rexdl download
-dragon ball z kakarot apk revdl download
-dragon ball z kakarot apkpure mod download
-dragon ball z kakarot apkpure hack download
-dragon ball z kakarot apkpure cheats download
-dragon ball z kakarot apkpure unlocked download
-dragon ball z kakarot apkpure update download
-dragon ball z kakarot apkpure new version download
-dragon ball z kakarot apkpure online download
-dragon ball z kakarot apkpure offline mode download
-dragon ball z kakarot apkpure english version download
-dragon ball z kakarot apkpure no verification download
-how to download dragon ball z kakarot apk on android
-how to install dragon ball z kakarot apk on android
-how to play dragon ball z kakarot apk on android
-how to get dragon ball z kakarot apk for free on android
-how to update dragon ball z kakarot apk on android
-how to run dragon ball z kakarot apk on android
-how to fix dragon ball z kakarot apk on android
-how to hack dragon ball z kakarot apk on android
-how to mod dragon ball z kakarot apk on android
-how to cheat in dragon ball z kakarot apk on android
-how to unlock all characters in dragon ball z kakarot apk on android
-how to save game in dragon ball z kakarot apk on android
-how to change language in dragon ball z kakarot apk on android
-how to connect controller in dragon ball z kakarot apk on android
-how to play online in dragon ball z kakarot apk on android
-is there a dragon ball z kakarot apk for android
-is dragon ball z kakarot apk real or fake for android
-is dragon ball z kakarot apk safe or virus for android
-is dragon ball z kakarot apk worth it for android
-is dragon ball z kakarot apk compatible with my device for android
-is dragon ball z kakarot apk legal or illegal for android
-is dragon ball z kakarot apk official or unofficial for android
-is dragon ball z kakarot apk original or modded for android
-is dragon ball z kakarot apk working or not for android

-

Frequently Asked Questions

-

Here are some frequently asked questions about Dragon Ball Z Kakarot and APKPure:

-
    -
  1. Is Dragon Ball Z Kakarot free to play?
    No, Dragon Ball Z Kakarot is not a free-to-play game. It is a paid game that costs $59.99 on Steam and $39.99 on PlayStation 4 and Xbox One. However, you can download it for free from APKPure if you have an Android device.
  2. -
  3. Is Dragon Ball Z Kakarot online or offline?
    Dragon Ball Z Kakarot is mainly an offline game that does not require an internet connection to play. However, some features such as online events, leaderboards, achievements, and updates may require an internet connection.
  4. -
  5. Is Dragon Ball Z Kakarot multiplayer or single-player?
    Dragon Ball Z Kakarot is a single-player game that does not have a multiplayer mode. However, you can play with other players online in some events such as raids, boss battles, and tournaments.
  6. -
  7. Is APKPure safe to use?
    Yes, APKPure is safe to use as it does not contain any viruses or malware. APKPure also verifies the authenticity and integrity of the files it provides. You can trust APKPure to download and install Android games and apps without any worries.
  8. -
  9. How to update Dragon Ball Z Kakarot on Android?
    To update Dragon Ball Z Kakarot on Android, you need to visit APKPure website again and check if there is a new version available. If there is, you can download and install it over the existing one. You can also enable the auto-update feature on APKPure app to get the latest updates automatically.
  10. -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/232labs/VToonify/vtoonify/model/encoder/encoders/helpers.py b/spaces/232labs/VToonify/vtoonify/model/encoder/encoders/helpers.py deleted file mode 100644 index b51fdf97141407fcc1c9d249a086ddbfd042469f..0000000000000000000000000000000000000000 --- a/spaces/232labs/VToonify/vtoonify/model/encoder/encoders/helpers.py +++ /dev/null @@ -1,119 +0,0 @@ -from collections import namedtuple -import torch -from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module - -""" -ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) -""" - - -class Flatten(Module): - def forward(self, input): - return input.view(input.size(0), -1) - - -def l2_norm(input, axis=1): - norm = torch.norm(input, 2, axis, True) - output = torch.div(input, norm) - return output - - -class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): - """ A named tuple describing a ResNet block. """ - - -def get_block(in_channel, depth, num_units, stride=2): - return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] - - -def get_blocks(num_layers): - if num_layers == 50: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=4), - get_block(in_channel=128, depth=256, num_units=14), - get_block(in_channel=256, depth=512, num_units=3) - ] - elif num_layers == 100: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=13), - get_block(in_channel=128, depth=256, num_units=30), - get_block(in_channel=256, depth=512, num_units=3) - ] - elif num_layers == 152: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=8), - get_block(in_channel=128, depth=256, num_units=36), - get_block(in_channel=256, depth=512, num_units=3) - ] - else: - raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers)) - return blocks - - -class SEModule(Module): - def __init__(self, channels, reduction): - super(SEModule, self).__init__() - self.avg_pool = AdaptiveAvgPool2d(1) - self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) - self.relu = ReLU(inplace=True) - self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) - self.sigmoid = Sigmoid() - - def forward(self, x): - module_input = x - x = self.avg_pool(x) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - return module_input * x - - -class bottleneck_IR(Module): - def __init__(self, in_channel, depth, stride): - super(bottleneck_IR, self).__init__() - if in_channel == depth: - self.shortcut_layer = MaxPool2d(1, stride) - else: - self.shortcut_layer = Sequential( - Conv2d(in_channel, depth, (1, 1), stride, bias=False), - BatchNorm2d(depth) - ) - self.res_layer = Sequential( - BatchNorm2d(in_channel), - Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), - Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth) - ) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - return res + shortcut - - -class bottleneck_IR_SE(Module): - def __init__(self, in_channel, depth, stride): - super(bottleneck_IR_SE, self).__init__() - if in_channel == depth: - self.shortcut_layer = MaxPool2d(1, stride) - else: - self.shortcut_layer = Sequential( - Conv2d(in_channel, depth, (1, 1), stride, bias=False), - BatchNorm2d(depth) - ) - self.res_layer = Sequential( - BatchNorm2d(in_channel), - Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), - PReLU(depth), - Conv2d(depth, depth, (3, 3), stride, 1, bias=False), - BatchNorm2d(depth), - SEModule(depth, 16) - ) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - return res + shortcut diff --git a/spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/index.html b/spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/index.html deleted file mode 100644 index 42ce24f3a2fbd696921cf991e1df1cc0e481dc60..0000000000000000000000000000000000000000 --- a/spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/index.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - Wizardlm-13b-v1.2.Q4_0.gguf - - -

Wizardlm-13b-v1.2.Q4_0.gguf

-

- With the utilization of the - llama-cpp-python - package, we are excited to introduce the GGUF model hosted in the Hugging - Face Docker Spaces, made accessible through an OpenAI-compatible API. This - space includes comprehensive API documentation to facilitate seamless - integration. -

- -

- If you find this resource valuable, your support in the form of starring - the space would be greatly appreciated. Your engagement plays a vital role - in furthering the application for a community GPU grant, ultimately - enhancing the capabilities and accessibility of this space. -

- - diff --git a/spaces/AIFILMS/image-to-sound-fx/app.py b/spaces/AIFILMS/image-to-sound-fx/app.py deleted file mode 100644 index 9044639a5b247e132b3abebf90d61371ab89d806..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/image-to-sound-fx/app.py +++ /dev/null @@ -1,125 +0,0 @@ -import gradio as gr -import os -import time -from moviepy.editor import * -from share_btn import community_icon_html, loading_icon_html, share_js - -#token = os.environ.get('HF_TOKEN') -caption = gr.Blocks.load(name="spaces/laion/CoCa") -audio_gen = gr.Blocks.load(name="spaces/haoheliu/audioldm-text-to-audio-generation") - -ph_message="If you're not happy with sound result, you can manually describe the scene depicted in your image :)" - -def input_changes(input_img): - - if input_img == None: - return manual_cap.update(value="",placeholder=ph_message), caption_output.update(value=None), sound_output.update(value=None) - else: - cap = caption(input_img, fn_index=0) - print("CoCa caption: '" + cap + "' • ") - ph_update = "CoCa caption: '" + cap + "' • " - - return manual_cap.update(value="",placeholder=f"{ph_update}{ph_message}"), caption_output.update(value=cap), sound_output.update(value=None) - -def infer(image_input, manual_caption, duration_in, seed, caption_output): - - print(duration_in) - if manual_caption == "": - cap = caption_output - #cap = caption(image_input, fn_index=0) - #print("CoCa caption: '" + cap + "' • ") - #ph_update = "CoCa caption: '" + cap + "' • " - else: - cap = manual_caption - print("manual caption: " + cap) - ph_update="" - - sound = audio_gen(cap, duration_in, 2.5, seed, 3, fn_index=0) - - #return cap, sound[1], gr.Textbox.update(placeholder=f"{ph_update}{ph_message}"), gr.Group.update(visible=True) - return cap, sound[1], gr.Group.update(visible=True) - -title = """ -
-
-

- Image to Sound Effect -

-
-

- Convert an image to a corresponding sound effect generated through CoCa Image Captioning & AudioLDM -

-
-""" - -article = """ - - - -
-

You may also like:

- -
- - - - - - - - - - - - - -
-
-""" - -with gr.Blocks(css="style.css") as demo: - with gr.Column(elem_id="col-container"): - - gr.HTML(title) - - input_img = gr.Image(type="filepath", elem_id="input-img") - - with gr.Column(): - manual_cap = gr.Textbox(label="Manual Image description (optional)", lines=3, placeholder=ph_message) - with gr.Row(): - duration_in = gr.Slider(minimum=5, maximum=10, step=5, value=5, label="Duration") - seed_in = gr.Slider(label="Seed", value=440, minimum=45, maximum=10000, step=1) - - caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption") - sound_output = gr.Audio(label="Result", elem_id="sound-output") - - generate = gr.Button("Generate SFX from Image") - - with gr.Group(elem_id="share-btn-container", visible=False) as share_group: - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button("Share to community", elem_id="share-btn") - - gr.HTML(article) - - change_out = [manual_cap, caption_output, sound_output] - input_img.change(input_changes, input_img, change_out, queue=False) - - - - generate.click(infer, inputs=[input_img, manual_cap, duration_in, seed_in, caption_output], outputs=[caption_output, sound_output, share_group], api_name="i2fx") - share_button.click(None, [], [], _js=share_js) - -demo.queue(max_size=32).launch(debug=True) diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/__init__.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/__init__.py deleted file mode 100644 index 96ccf3e709b62e0548572ea424bb03a1a67a4b2e..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .factory import list_models, create_model, create_model_and_transforms, add_model_config -from .loss import ClipLoss, gather_features, LPLoss, lp_gather_features, LPMetrics -from .model import CLAP, CLAPTextCfg, CLAPVisionCfg, CLAPAudioCfp, convert_weights_to_fp16, trace_model -from .openai import load_openai_model, list_openai_models -from .pretrained import list_pretrained, list_pretrained_tag_models, list_pretrained_model_tags,\ - get_pretrained_url, download_pretrained -from .tokenizer import SimpleTokenizer, tokenize -from .transform import image_transform diff --git a/spaces/AIGText/GlyphControl/ldm/modules/ema.py b/spaces/AIGText/GlyphControl/ldm/modules/ema.py deleted file mode 100644 index d1488d699231e6712d09c0634854ac91d3a9b603..0000000000000000000000000000000000000000 --- a/spaces/AIGText/GlyphControl/ldm/modules/ema.py +++ /dev/null @@ -1,80 +0,0 @@ -import torch -from torch import nn - - -class LitEma(nn.Module): - def __init__(self, model, decay=0.9999, init_num_updates = 0, use_num_upates=True): - super().__init__() - if decay < 0.0 or decay > 1.0: - raise ValueError('Decay must be between 0 and 1') - - self.m_name2s_name = {} - self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) - self.register_buffer('num_updates', torch.tensor(init_num_updates, dtype=torch.int) if use_num_upates - else torch.tensor(-1, dtype=torch.int)) # 0 - - for name, p in model.named_parameters(): - if p.requires_grad: - # remove as '.'-character is not allowed in buffers - s_name = name.replace('.', '') - self.m_name2s_name.update({name: s_name}) - self.register_buffer(s_name, p.clone().detach().data) - - self.collected_params = [] - - def reset_num_updates(self): - del self.num_updates - self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int)) - - def forward(self, model): - decay = self.decay - - if self.num_updates >= 0: - self.num_updates += 1 - decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates)) - - one_minus_decay = 1.0 - decay - - with torch.no_grad(): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - - for key in m_param: - if m_param[key].requires_grad: - sname = self.m_name2s_name[key] - shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) - shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) - else: - assert not key in self.m_name2s_name - - def copy_to(self, model): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - for key in m_param: - if m_param[key].requires_grad: - m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) - else: - assert not key in self.m_name2s_name - - def store(self, parameters): - """ - Save the current parameters for restoring later. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - temporarily stored. - """ - self.collected_params = [param.clone() for param in parameters] - - def restore(self, parameters): - """ - Restore the parameters stored with the `store` method. - Useful to validate the model with EMA parameters without affecting the - original optimization process. Store the parameters before the - `copy_to` method. After validation (or model saving), use this to - restore the former parameters. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored parameters. - """ - for c_param, param in zip(self.collected_params, parameters): - param.data.copy_(c_param.data) diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/streamToAsyncIterable.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/streamToAsyncIterable.ts deleted file mode 100644 index e935d719c8c29eb5e4efc30812f61b5f44716923..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/streamToAsyncIterable.ts +++ /dev/null @@ -1,15 +0,0 @@ -// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of#iterating_over_async_generators -export async function* streamToAsyncIterable( - stream: ReadableStream -): AsyncIterableIterator { - const reader = stream.getReader(); - try { - while (true) { - const { done, value } = await reader.read(); - if (done) return; - yield value; - } - } finally { - reader.releaseLock(); - } -} diff --git a/spaces/Aditya9790/yolo7-object-tracking/utils/aws/mime.sh b/spaces/Aditya9790/yolo7-object-tracking/utils/aws/mime.sh deleted file mode 100644 index c319a83cfbdf09bea634c3bd9fca737c0b1dd505..0000000000000000000000000000000000000000 --- a/spaces/Aditya9790/yolo7-object-tracking/utils/aws/mime.sh +++ /dev/null @@ -1,26 +0,0 @@ -# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ -# This script will run on every instance restart, not only on first start -# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- - -Content-Type: multipart/mixed; boundary="//" -MIME-Version: 1.0 - ---// -Content-Type: text/cloud-config; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="cloud-config.txt" - -#cloud-config -cloud_final_modules: -- [scripts-user, always] - ---// -Content-Type: text/x-shellscript; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="userdata.txt" - -#!/bin/bash -# --- paste contents of userdata.sh here --- ---// diff --git a/spaces/Aditya9790/yolo7-object-tracking/utils/google_app_engine/Dockerfile b/spaces/Aditya9790/yolo7-object-tracking/utils/google_app_engine/Dockerfile deleted file mode 100644 index 0155618f475104e9858b81470339558156c94e13..0000000000000000000000000000000000000000 --- a/spaces/Aditya9790/yolo7-object-tracking/utils/google_app_engine/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM gcr.io/google-appengine/python - -# Create a virtualenv for dependencies. This isolates these packages from -# system-level packages. -# Use -p python3 or -p python3.7 to select python version. Default is version 2. -RUN virtualenv /env -p python3 - -# Setting these environment variables are the same as running -# source /env/bin/activate. -ENV VIRTUAL_ENV /env -ENV PATH /env/bin:$PATH - -RUN apt-get update && apt-get install -y python-opencv - -# Copy the application's requirements.txt and run pip to install all -# dependencies into the virtualenv. -ADD requirements.txt /app/requirements.txt -RUN pip install -r /app/requirements.txt - -# Add the application source code. -ADD . /app - -# Run a WSGI server to serve the application. gunicorn must be declared as -# a dependency in requirements.txt. -CMD gunicorn -b :$PORT main:app diff --git a/spaces/AgentVerse/agentVerse/ui/README.md b/spaces/AgentVerse/agentVerse/ui/README.md deleted file mode 100644 index acfff8c574d8da6d78e65b684d6ed0a043d76a07..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/README.md +++ /dev/null @@ -1 +0,0 @@ -# Work in progress \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Clock.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Clock.d.ts deleted file mode 100644 index d85c7816afe5fa3eedae1ac6208a08e211c0851c..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Clock.d.ts +++ /dev/null @@ -1,2 +0,0 @@ -import Base from '../base/Base'; -export default class Clock extends Base { } \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Factory.d.ts deleted file mode 100644 index e95eba9ba06402ea6929098aa01be9747043d91a..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Factory.d.ts +++ /dev/null @@ -1,8 +0,0 @@ -// import * as Phaser from 'phaser'; -import Container from "./Container"; - -export default function ( - x?: number, y?: number, - width?: number, height?: number, - children?: Phaser.GameObjects.GameObject[] -): Container; \ No newline at end of file diff --git a/spaces/Agusbs98/automatic-ecg-diagnosis/nets/backbones.py b/spaces/Agusbs98/automatic-ecg-diagnosis/nets/backbones.py deleted file mode 100644 index ef5d9b839788d662a234fa8ba01cf611508088fc..0000000000000000000000000000000000000000 --- a/spaces/Agusbs98/automatic-ecg-diagnosis/nets/backbones.py +++ /dev/null @@ -1,57 +0,0 @@ - -import os, sys -from libs import * -from .layers import * -from .modules import * -from .bblocks import * - -class LightSEResNet18(nn.Module): - def __init__(self, - base_channels = 64, - ): - super(LightSEResNet18, self).__init__() - self.bblock = LightSEResBlock - self.stem = nn.Sequential( - nn.Conv1d( - 1, base_channels, - kernel_size = 15, padding = 7, stride = 2, - ), - nn.BatchNorm1d(base_channels), - nn.ReLU(), - nn.MaxPool1d( - kernel_size = 3, padding = 1, stride = 2, - ), - ) - self.stage_0 = nn.Sequential( - self.bblock(base_channels), - self.bblock(base_channels), - ) - - self.stage_1 = nn.Sequential( - self.bblock(base_channels*1, downsample = True), - self.bblock(base_channels*2), - ) - self.stage_2 = nn.Sequential( - self.bblock(base_channels*2, downsample = True), - self.bblock(base_channels*4), - ) - self.stage_3 = nn.Sequential( - self.bblock(base_channels*4, downsample = True), - self.bblock(base_channels*8), - ) - - self.pool = nn.AdaptiveAvgPool1d(1) - - def forward(self, - input, - ): - output = self.stem(input) - output = self.stage_0(output) - - output = self.stage_1(output) - output = self.stage_2(output) - output = self.stage_3(output) - - output = self.pool(output) - - return output \ No newline at end of file diff --git a/spaces/AlekseyCalvin/dreambooth-training3/train_dreambooth.py b/spaces/AlekseyCalvin/dreambooth-training3/train_dreambooth.py deleted file mode 100644 index f4ff135e549f0d6c72f733092f3df817cb178e01..0000000000000000000000000000000000000000 --- a/spaces/AlekseyCalvin/dreambooth-training3/train_dreambooth.py +++ /dev/null @@ -1,889 +0,0 @@ -import argparse -import itertools -import math -import os -from pathlib import Path -from typing import Optional -import subprocess -import sys -import gc -import random - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from torch.utils.data import Dataset - -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import set_seed -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.utils.import_utils import is_xformers_available -from diffusers.optimization import get_scheduler -from huggingface_hub import HfFolder, Repository, whoami -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer - - -logger = get_logger(__name__) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - #required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - #required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - #required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default="", - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If not have enough images, additional images will be" - " sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" - ) - parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument( - "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-6, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - - parser.add_argument( - "--save_n_steps", - type=int, - default=1, - help=("Save the model every n global_steps"), - ) - - - parser.add_argument( - "--save_starting_step", - type=int, - default=1, - help=("The step from which it starts saving intermediary checkpoints"), - ) - - parser.add_argument( - "--stop_text_encoder_training", - type=int, - default=1000000, - help=("The step at which the text_encoder is no longer trained"), - ) - - - parser.add_argument( - "--image_captions_filename", - action="store_true", - help="Get captions from filename", - ) - - - parser.add_argument( - "--dump_only_text_encoder", - action="store_true", - default=False, - help="Dump only text encoder", - ) - - parser.add_argument( - "--train_only_unet", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--cache_latents", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--Session_dir", - type=str, - default="", - help="Current session directory", - ) - - - - - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - #if args.instance_data_dir is None: - # raise ValueError("You must specify a train data directory.") - - #if args.with_prior_preservation: - # if args.class_data_dir is None: - # raise ValueError("You must specify a data directory for class images.") - # if args.class_prompt is None: - # raise ValueError("You must specify prompt for class images.") - - return args - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - args, - class_data_root=None, - class_prompt=None, - size=512, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - self.image_captions_filename = None - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if args.image_captions_filename: - self.image_captions_filename = True - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - random.shuffle(self.class_images_path) - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - path = self.instance_images_path[index % self.num_instance_images] - instance_image = Image.open(path) - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - - instance_prompt = self.instance_prompt - - if self.image_captions_filename: - filename = Path(path).stem - pt=''.join([i for i in filename if not i.isdigit()]) - pt=pt.replace("_"," ") - pt=pt.replace("(","") - pt=pt.replace(")","") - pt=pt.replace("-","") - instance_prompt = pt - sys.stdout.write(" " +instance_prompt+" ") - sys.stdout.flush() - - - example["instance_images"] = self.image_transforms(instance_image) - example["instance_prompt_ids"] = self.tokenizer( - instance_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - if self.class_data_root: - class_image = Image.open(self.class_images_path[index % self.num_class_images]) - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - example["class_prompt_ids"] = self.tokenizer( - self.class_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - return example - - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - -class LatentsDataset(Dataset): - def __init__(self, latents_cache, text_encoder_cache): - self.latents_cache = latents_cache - self.text_encoder_cache = text_encoder_cache - - def __len__(self): - return len(self.latents_cache) - - def __getitem__(self, index): - return self.latents_cache[index], self.text_encoder_cache[index] - -def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): - if token is None: - token = HfFolder.get_token() - if organization is None: - username = whoami(token)["name"] - return f"{username}/{model_id}" - else: - return f"{organization}/{model_id}" - -def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict: - """ - Starts from base starting dict and then adds the remaining key values from updater replacing the values from - the first starting/base dict with the second updater dict. - - For later: how does d = {**d1, **d2} replace collision? - - :param starting_dict: - :param updater_dict: - :return: - """ - new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict - new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict - return new_dict - -def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace: - """ - - ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x - :param args1: - :param args2: - :return: - """ - # - the merged args - # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}. - merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2)) - args = argparse.Namespace(**merged_key_values_for_namespace) - return args - -def run_training(args_imported): - args_default = parse_args() - args = merge_args(args_default, args_imported) - print(args) - logging_dir = Path(args.output_dir, args.logging_dir) - i=args.save_starting_step - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with="tensorboard", - logging_dir=logging_dir, - ) - - # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate - # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. - # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. - if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: - raise ValueError( - "Gradient accumulation is not supported when training the text encoder in distributed training. " - "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." - ) - - if args.seed is not None: - set_seed(args.seed) - - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, torch_dtype=torch_dtype - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - - for example in tqdm( - sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process - ): - with torch.autocast("cuda"): - images = pipeline(example["prompt"]).images - - for i, image in enumerate(images): - image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg") - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - - with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: - if "step_*" not in gitignore: - gitignore.write("step_*\n") - if "epoch_*" not in gitignore: - gitignore.write("epoch_*\n") - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") - - # Load models and create wrapper for stable diffusion - if args.train_only_unet: - if os.path.exists(str(args.output_dir+"/text_encoder_trained")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained") - elif os.path.exists(str(args.output_dir+"/text_encoder")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") - unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") - if is_xformers_available(): - try: - print("Enabling memory efficient attention with xformers...") - unet.enable_xformers_memory_efficient_attention() - except Exception as e: - logger.warning( - f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}" - ) - vae.requires_grad_(False) - if not args.train_text_encoder: - text_encoder.requires_grad_(False) - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - if args.train_text_encoder: - text_encoder.gradient_checkpointing_enable() - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - params_to_optimize = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() - ) - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler") - - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - instance_prompt=args.instance_prompt, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_prompt=args.class_prompt, - tokenizer=tokenizer, - size=args.resolution, - center_crop=args.center_crop, - args=args, - ) - - def collate_fn(examples): - input_ids = [example["instance_prompt_ids"] for example in examples] - pixel_values = [example["instance_images"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if args.with_prior_preservation: - input_ids += [example["class_prompt_ids"] for example in examples] - pixel_values += [example["class_images"] for example in examples] - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids - - batch = { - "input_ids": input_ids, - "pixel_values": pixel_values, - } - return batch - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - ) - - if args.train_text_encoder: - unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, text_encoder, optimizer, train_dataloader, lr_scheduler - ) - else: - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - weight_dtype = torch.float32 - if args.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif args.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move text_encode and vae to gpu. - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - vae.to(accelerator.device, dtype=weight_dtype) - if not args.train_text_encoder: - text_encoder.to(accelerator.device, dtype=weight_dtype) - - - if args.cache_latents: - latents_cache = [] - text_encoder_cache = [] - for batch in tqdm(train_dataloader, desc="Caching latents"): - with torch.no_grad(): - batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype) - batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True) - latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) - if args.train_text_encoder: - text_encoder_cache.append(batch["input_ids"]) - else: - text_encoder_cache.append(text_encoder(batch["input_ids"])[0]) - train_dataset = LatentsDataset(latents_cache, text_encoder_cache) - train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True) - - del vae - #if not args.train_text_encoder: - # del text_encoder - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("dreambooth", config=vars(args)) - - def bar(prg): - br='|'+'█' * prg + ' ' * (25-prg)+'|' - return br - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) - global_step = 0 - - for epoch in range(args.num_train_epochs): - unet.train() - if args.train_text_encoder: - text_encoder.train() - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(unet): - # Convert images to latent space - with torch.no_grad(): - if args.cache_latents: - latents_dist = batch[0][0] - else: - latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist - latents = latents_dist.sample() * 0.18215 - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - if(args.cache_latents): - if args.train_text_encoder: - encoder_hidden_states = text_encoder(batch[0][1])[0] - else: - encoder_hidden_states = batch[0][1] - else: - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - # Predict the noise residual - model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - if args.with_prior_preservation: - # Chunk the noise and model_pred into two parts and compute the loss on each part separately. - model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) - target, target_prior = torch.chunk(target, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() - - # Compute prior loss - prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) - if args.train_text_encoder - else unet.parameters() - ) - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - fll=round((global_step*100)/args.max_train_steps) - fll=round(fll/4) - pr=bar(fll) - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - progress_bar.set_description_str("Progress:"+pr) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30: - if accelerator.is_main_process: - print(" " +" Freezing the text_encoder ..."+" ") - frz_dir=args.output_dir + "/text_encoder_frozen" - if os.path.exists(frz_dir): - subprocess.call('rm -r '+ frz_dir, shell=True) - os.mkdir(frz_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(frz_dir) - - if args.save_n_steps >= 200: - if global_step < args.max_train_steps and global_step+1==i: - ckpt_name = "_step_" + str(global_step+1) - save_dir = Path(args.output_dir+ckpt_name) - save_dir=str(save_dir) - save_dir=save_dir.replace(" ", "_") - if not os.path.exists(save_dir): - os.mkdir(save_dir) - inst=save_dir[16:] - inst=inst.replace(" ", "_") - print(" SAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt") - # Create the pipeline using the trained modules and save it. - if accelerator.is_main_process: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(save_dir) - frz_dir=args.output_dir + "/text_encoder_frozen" - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True) - subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True) - chkpth=args.Session_dir+"/"+inst+".ckpt" - subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True) - subprocess.call('rm -r '+ save_dir, shell=True) - i=i+args.save_n_steps - - accelerator.wait_for_everyone() - - # Create the pipeline using using the trained modules and save it. - if accelerator.is_main_process: - if args.dump_only_text_encoder: - txt_dir=args.output_dir + "/text_encoder_trained" - if not os.path.exists(txt_dir): - os.mkdir(txt_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(txt_dir) - - elif args.train_only_unet: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(args.output_dir) - txt_dir=args.output_dir + "/text_encoder_trained" - subprocess.call('rm -r '+txt_dir, shell=True) - - else: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - frz_dir=args.output_dir + "/text_encoder_frozen" - pipeline.save_pretrained(args.output_dir) - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True) - subprocess.call('rm -r '+ frz_dir, shell=True) - - if args.push_to_hub: - repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) - - accelerator.end_training() - del pipeline - torch.cuda.empty_cache() - gc.collect() -if __name__ == "__main__": - pass - #main() - diff --git a/spaces/Altinas/vits-uma-genshin-honkais/utils.py b/spaces/Altinas/vits-uma-genshin-honkais/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/Altinas/vits-uma-genshin-honkais/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py deleted file mode 100644 index 49ab2304c146259cdb186457a92fd35cd0ebdfa5..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +++ /dev/null @@ -1,772 +0,0 @@ -# Copyright 2023 TencentARC and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import warnings -from dataclasses import dataclass -from typing import Any, Callable, Dict, List, Optional, Union - -import numpy as np -import PIL -import torch -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer - -from ...image_processor import VaeImageProcessor -from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, MultiAdapter, T2IAdapter, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( - PIL_INTERPOLATION, - BaseOutput, - is_accelerate_available, - is_accelerate_version, - logging, - randn_tensor, - replace_example_docstring, -) -from ..pipeline_utils import DiffusionPipeline -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker - - -@dataclass -class StableDiffusionAdapterPipelineOutput(BaseOutput): - """ - Args: - images (`List[PIL.Image.Image]` or `np.ndarray`) - List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, - num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. - nsfw_content_detected (`List[bool]`) - List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, or `None` if safety checking could not be performed. - """ - - images: Union[List[PIL.Image.Image], np.ndarray] - nsfw_content_detected: Optional[List[bool]] - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> from PIL import Image - >>> from diffusers.utils import load_image - >>> import torch - >>> from diffusers import StableDiffusionAdapterPipeline, T2IAdapter - - >>> image = load_image( - ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_ref.png" - ... ) - - >>> color_palette = image.resize((8, 8)) - >>> color_palette = color_palette.resize((512, 512), resample=Image.Resampling.NEAREST) - - >>> adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_color_sd14v1", torch_dtype=torch.float16) - >>> pipe = StableDiffusionAdapterPipeline.from_pretrained( - ... "CompVis/stable-diffusion-v1-4", - ... adapter=adapter, - ... torch_dtype=torch.float16, - ... ) - - >>> pipe.to("cuda") - - >>> out_image = pipe( - ... "At night, glowing cubes in front of the beach", - ... image=color_palette, - ... ).images[0] - ``` -""" - - -def _preprocess_adapter_image(image, height, width): - if isinstance(image, torch.Tensor): - return image - elif isinstance(image, PIL.Image.Image): - image = [image] - - if isinstance(image[0], PIL.Image.Image): - image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image] - image = [ - i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image - ] # expand [h, w] or [h, w, c] to [b, h, w, c] - image = np.concatenate(image, axis=0) - image = np.array(image).astype(np.float32) / 255.0 - image = image.transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - elif isinstance(image[0], torch.Tensor): - if image[0].ndim == 3: - image = torch.stack(image, dim=0) - elif image[0].ndim == 4: - image = torch.cat(image, dim=0) - else: - raise ValueError( - f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}" - ) - return image - - -class StableDiffusionAdapterPipeline(DiffusionPipeline): - r""" - Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter - https://arxiv.org/abs/2302.08453 - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`): - Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a - list, the outputs from each Adapter are added together to create one combined additional conditioning. - adapter_weights (`List[float]`, *optional*, defaults to None): - List of floats representing the weight which will be multiply to each adapter's output before adding them - together. - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], - scheduler: KarrasDiffusionSchedulers, - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPFeatureExtractor, - adapter_weights: Optional[List[float]] = None, - requires_safety_checker: bool = True, - ): - super().__init__() - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - if isinstance(adapter, (list, tuple)): - adapter = MultiAdapter(adapter, adapter_weights=adapter_weights) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - adapter=adapter, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing - def enable_vae_slicing(self): - r""" - Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to - compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. - """ - self.vae.enable_slicing() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing - def disable_vae_slicing(self): - r""" - Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_slicing() - - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared - to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` - method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with - `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - hook = None - for cpu_offloaded_model in [self.text_encoder, self.adapter, self.unet, self.vae]: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - if self.safety_checker is not None: - _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - lora_scale: Optional[float] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - lora_scale (`float`, *optional*): - A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. - """ - # set lora scale so that monkey patched LoRA - # function of text encoder can correctly access it - if lora_scale is not None and isinstance(self, LoraLoaderMixin): - self._lora_scale = lora_scale - - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - if prompt_embeds is None: - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - prompt = self.maybe_convert_prompt(prompt, self.tokenizer) - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - prompt_embeds = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - prompt_embeds = prompt_embeds[0] - - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif prompt is not None and type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) - - max_length = prompt_embeds.shape[1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - negative_prompt_embeds = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - negative_prompt_embeds = negative_prompt_embeds[0] - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - return prompt_embeds - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is None: - has_nsfw_concept = None - else: - if torch.is_tensor(image): - feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") - else: - feature_extractor_input = self.image_processor.numpy_to_pil(image) - safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - return image, has_nsfw_concept - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents - def decode_latents(self, latents): - warnings.warn( - "The decode_latents method is deprecated and will be removed in a future version. Please" - " use VaeImageProcessor instead", - FutureWarning, - ) - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.decode(latents, return_dict=False)[0] - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs - def check_inputs( - self, - prompt, - height, - width, - callback_steps, - negative_prompt=None, - prompt_embeds=None, - negative_prompt_embeds=None, - ): - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents - def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - def _default_height_width(self, height, width, image): - # NOTE: It is possible that a list of images have different - # dimensions for each image, so just checking the first image - # is not _exactly_ correct, but it is simple. - while isinstance(image, list): - image = image[0] - - if height is None: - if isinstance(image, PIL.Image.Image): - height = image.height - elif isinstance(image, torch.Tensor): - height = image.shape[-2] - - # round down to nearest multiple of `self.adapter.total_downscale_factor` - height = (height // self.adapter.total_downscale_factor) * self.adapter.total_downscale_factor - - if width is None: - if isinstance(image, PIL.Image.Image): - width = image.width - elif isinstance(image, torch.Tensor): - width = image.shape[-1] - - # round down to nearest multiple of `self.adapter.total_downscale_factor` - width = (width // self.adapter.total_downscale_factor) * self.adapter.total_downscale_factor - - return height, width - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Union[str, List[str]] = None, - image: Union[torch.Tensor, PIL.Image.Image, List[PIL.Image.Image]] = None, - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - adapter_conditioning_scale: Union[float, List[float]] = 1.0, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`): - The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the - type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be - accepted as an image. The control image is automatically resized to fit the output image. - height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. - Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] instead - of a plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under - `self.processor` in - [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). - adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): - The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the - residual in the original unet. If multiple adapters are specified in init, you can set the - corresponding scale as a list. - - Examples: - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a - `tuple. When returning a tuple, the first element is a list with the generated images, and the second - element is a list of `bool`s denoting whether the corresponding generated image likely represents - "not-safe-for-work" (nsfw) content, according to the `safety_checker`. - """ - # 0. Default height and width to unet - height, width = self._default_height_width(height, width, image) - device = self._execution_device - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds - ) - - is_multi_adapter = isinstance(self.adapter, MultiAdapter) - if is_multi_adapter: - adapter_input = [_preprocess_adapter_image(img, height, width).to(device) for img in image] - n, c, h, w = adapter_input[0].shape - adapter_input = torch.stack([x.reshape([n * c, h, w]) for x in adapter_input]) - else: - adapter_input = _preprocess_adapter_image(image, height, width).to(device) - adapter_input = adapter_input.to(self.adapter.dtype) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - prompt_embeds = self._encode_prompt( - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - ) - - # 4. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # 5. Prepare latent variables - num_channels_latents = self.unet.config.in_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - prompt_embeds.dtype, - device, - generator, - latents, - ) - - # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 7. Denoising loop - adapter_state = self.adapter(adapter_input) - for k, v in enumerate(adapter_state): - adapter_state[k] = v * adapter_conditioning_scale - if num_images_per_prompt > 1: - for k, v in enumerate(adapter_state): - adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) - if do_classifier_free_guidance: - for k, v in enumerate(adapter_state): - adapter_state[k] = torch.cat([v] * 2, dim=0) - - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - down_block_additional_residuals=[state.clone() for state in adapter_state], - ).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - if output_type == "latent": - image = latents - has_nsfw_concept = None - elif output_type == "pil": - # 8. Post-processing - image = self.decode_latents(latents) - - # 9. Run safety checker - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - - # 10. Convert to PIL - image = self.numpy_to_pil(image) - else: - # 8. Post-processing - image = self.decode_latents(latents) - - # 9. Run safety checker - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionAdapterPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/fp16/README.md b/spaces/Andy1621/uniformer_image_detection/configs/fp16/README.md deleted file mode 100644 index 17eaa7d1dea393cbf9b8e3fd44c607b447812e6f..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/fp16/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Mixed Precision Training - -## Introduction - -[OTHERS] - -```latex -@article{micikevicius2017mixed, - title={Mixed precision training}, - author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, - journal={arXiv preprint arXiv:1710.03740}, - year={2017} -} -``` - -## Results and Models - -| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -|:------------:|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:| -| Faster R-CNN | R-50 | pytorch | 1x | 3.4 | 28.8 | 37.5 | - |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204_143530.log.json) | -| Mask R-CNN | R-50 | pytorch | 1x | 3.6 | 24.1 | 38.1 | 34.7 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205_130539.log.json) | -| Retinanet | R-50 | pytorch | 1x | 2.8 | 31.6 | 36.4 | |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702_020127.log.json) | diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py deleted file mode 100644 index 04971226eb0fd6461b715358ac955dfb78102992..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_80k_ade20k.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - dropout_ratio=-1, - num_classes=150, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - dropout_ratio=-1, - num_classes=150, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index b94974f865491731a1251e3e9736e01cbe81b06f..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -name: Feature request -about: Suggest an improvement or new feature for the web UI -title: '' -labels: 'enhancement' -assignees: '' - ---- - -**Description** - -A clear and concise description of what you want to be implemented. - -**Additional Context** - -If applicable, please provide any extra information, external links, or screenshots that could be useful. diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/character_bias/script.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/character_bias/script.py deleted file mode 100644 index ff12f3afdc28be4ead12ffab90bd9fbd783514a2..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/character_bias/script.py +++ /dev/null @@ -1,83 +0,0 @@ -import os - -import gradio as gr - -# get the current directory of the script -current_dir = os.path.dirname(os.path.abspath(__file__)) - -# check if the bias_options.txt file exists, if not, create it -bias_file = os.path.join(current_dir, "bias_options.txt") -if not os.path.isfile(bias_file): - with open(bias_file, "w") as f: - f.write("*I am so happy*\n*I am so sad*\n*I am so excited*\n*I am so bored*\n*I am so angry*") - -# read bias options from the text file -with open(bias_file, "r") as f: - bias_options = [line.strip() for line in f.readlines()] - -params = { - "activate": True, - "bias string": " *I am so happy*", - "use custom string": False, -} - - -def input_modifier(string): - """ - This function is applied to your text inputs before - they are fed into the model. - """ - return string - - -def output_modifier(string): - """ - This function is applied to the model outputs. - """ - return string - - -def bot_prefix_modifier(string): - """ - This function is only applied in chat mode. It modifies - the prefix text for the Bot and can be used to bias its - behavior. - """ - if params['activate']: - if params['use custom string']: - return f'{string} {params["custom string"].strip()} ' - else: - return f'{string} {params["bias string"].strip()} ' - else: - return string - - -def ui(): - # Gradio elements - activate = gr.Checkbox(value=params['activate'], label='Activate character bias') - dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file') - use_custom_string = gr.Checkbox(value=False, label='Use custom bias textbox instead of dropdown') - custom_string = gr.Textbox(value="", placeholder="Enter custom bias string", label="Custom Character Bias", info='To use this textbox activate the checkbox above') - - # Event functions to update the parameters in the backend - def update_bias_string(x): - if x: - params.update({"bias string": x}) - else: - params.update({"bias string": dropdown_string.get()}) - return x - - def update_custom_string(x): - params.update({"custom string": x}) - - dropdown_string.change(update_bias_string, dropdown_string, None) - custom_string.change(update_custom_string, custom_string, None) - activate.change(lambda x: params.update({"activate": x}), activate, None) - use_custom_string.change(lambda x: params.update({"use custom string": x}), use_custom_string, None) - - # Group elements together depending on the selected option - def bias_string_group(): - if use_custom_string.value: - return gr.Group([use_custom_string, custom_string]) - else: - return dropdown_string diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/cityscapes.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/cityscapes.py deleted file mode 100644 index 81e47a914a1aa2e5458e18669d65ffb742f46fc6..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/cityscapes.py +++ /dev/null @@ -1,217 +0,0 @@ -import os.path as osp -import tempfile - -import annotator.uniformer.mmcv as mmcv -import numpy as np -from annotator.uniformer.mmcv.utils import print_log -from PIL import Image - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class CityscapesDataset(CustomDataset): - """Cityscapes dataset. - - The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is - fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset. - """ - - CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', - 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', - 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle') - - PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], - [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], - [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], - [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], - [0, 80, 100], [0, 0, 230], [119, 11, 32]] - - def __init__(self, **kwargs): - super(CityscapesDataset, self).__init__( - img_suffix='_leftImg8bit.png', - seg_map_suffix='_gtFine_labelTrainIds.png', - **kwargs) - - @staticmethod - def _convert_to_label_id(result): - """Convert trainId to id for cityscapes.""" - if isinstance(result, str): - result = np.load(result) - import cityscapesscripts.helpers.labels as CSLabels - result_copy = result.copy() - for trainId, label in CSLabels.trainId2label.items(): - result_copy[result == trainId] = label.id - - return result_copy - - def results2img(self, results, imgfile_prefix, to_label_id): - """Write the segmentation results to images. - - Args: - results (list[list | tuple | ndarray]): Testing results of the - dataset. - imgfile_prefix (str): The filename prefix of the png files. - If the prefix is "somepath/xxx", - the png files will be named "somepath/xxx.png". - to_label_id (bool): whether convert output to label_id for - submission - - Returns: - list[str: str]: result txt files which contains corresponding - semantic segmentation images. - """ - mmcv.mkdir_or_exist(imgfile_prefix) - result_files = [] - prog_bar = mmcv.ProgressBar(len(self)) - for idx in range(len(self)): - result = results[idx] - if to_label_id: - result = self._convert_to_label_id(result) - filename = self.img_infos[idx]['filename'] - basename = osp.splitext(osp.basename(filename))[0] - - png_filename = osp.join(imgfile_prefix, f'{basename}.png') - - output = Image.fromarray(result.astype(np.uint8)).convert('P') - import cityscapesscripts.helpers.labels as CSLabels - palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8) - for label_id, label in CSLabels.id2label.items(): - palette[label_id] = label.color - - output.putpalette(palette) - output.save(png_filename) - result_files.append(png_filename) - prog_bar.update() - - return result_files - - def format_results(self, results, imgfile_prefix=None, to_label_id=True): - """Format the results into dir (standard format for Cityscapes - evaluation). - - Args: - results (list): Testing results of the dataset. - imgfile_prefix (str | None): The prefix of images files. It - includes the file path and the prefix of filename, e.g., - "a/b/prefix". If not specified, a temp file will be created. - Default: None. - to_label_id (bool): whether convert output to label_id for - submission. Default: False - - Returns: - tuple: (result_files, tmp_dir), result_files is a list containing - the image paths, tmp_dir is the temporal directory created - for saving json/png files when img_prefix is not specified. - """ - - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: ' - f'{len(results)} != {len(self)}') - - if imgfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - imgfile_prefix = tmp_dir.name - else: - tmp_dir = None - result_files = self.results2img(results, imgfile_prefix, to_label_id) - - return result_files, tmp_dir - - def evaluate(self, - results, - metric='mIoU', - logger=None, - imgfile_prefix=None, - efficient_test=False): - """Evaluation in Cityscapes/default protocol. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | None | str): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file, - for cityscapes evaluation only. It includes the file path and - the prefix of filename, e.g., "a/b/prefix". - If results are evaluated with cityscapes protocol, it would be - the prefix of output png files. The output files would be - png images under folder "a/b/prefix/xxx.png", where "xxx" is - the image name of cityscapes. If not specified, a temp file - will be created for evaluation. - Default: None. - - Returns: - dict[str, float]: Cityscapes/default metrics. - """ - - eval_results = dict() - metrics = metric.copy() if isinstance(metric, list) else [metric] - if 'cityscapes' in metrics: - eval_results.update( - self._evaluate_cityscapes(results, logger, imgfile_prefix)) - metrics.remove('cityscapes') - if len(metrics) > 0: - eval_results.update( - super(CityscapesDataset, - self).evaluate(results, metrics, logger, efficient_test)) - - return eval_results - - def _evaluate_cityscapes(self, results, logger, imgfile_prefix): - """Evaluation in Cityscapes protocol. - - Args: - results (list): Testing results of the dataset. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file - - Returns: - dict[str: float]: Cityscapes evaluation results. - """ - try: - import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa - except ImportError: - raise ImportError('Please run "pip install cityscapesscripts" to ' - 'install cityscapesscripts first.') - msg = 'Evaluating in Cityscapes style' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - result_files, tmp_dir = self.format_results(results, imgfile_prefix) - - if tmp_dir is None: - result_dir = imgfile_prefix - else: - result_dir = tmp_dir.name - - eval_results = dict() - print_log(f'Evaluating results under {result_dir} ...', logger=logger) - - CSEval.args.evalInstLevelScore = True - CSEval.args.predictionPath = osp.abspath(result_dir) - CSEval.args.evalPixelAccuracy = True - CSEval.args.JSONOutput = False - - seg_map_list = [] - pred_list = [] - - # when evaluating with official cityscapesscripts, - # **_gtFine_labelIds.png is used - for seg_map in mmcv.scandir( - self.ann_dir, 'gtFine_labelIds.png', recursive=True): - seg_map_list.append(osp.join(self.ann_dir, seg_map)) - pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) - - eval_results.update( - CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) - - if tmp_dir is not None: - tmp_dir.cleanup() - - return eval_results diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/tool_transfer_control.py b/spaces/Anonymous-sub/Rerender/ControlNet/tool_transfer_control.py deleted file mode 100644 index b84442cc93f7f9c30cb7311b8675d9124a6e8ec9..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/tool_transfer_control.py +++ /dev/null @@ -1,59 +0,0 @@ -path_sd15 = './models/v1-5-pruned.ckpt' -path_sd15_with_control = './models/control_sd15_openpose.pth' -path_input = './models/anything-v3-full.safetensors' -path_output = './models/control_any3_openpose.pth' - - -import os - - -assert os.path.exists(path_sd15), 'Input path_sd15 does not exists!' -assert os.path.exists(path_sd15_with_control), 'Input path_sd15_with_control does not exists!' -assert os.path.exists(path_input), 'Input path_input does not exists!' -assert os.path.exists(os.path.dirname(path_output)), 'Output folder not exists!' - - -import torch -from share import * -from cldm.model import load_state_dict - - -sd15_state_dict = load_state_dict(path_sd15) -sd15_with_control_state_dict = load_state_dict(path_sd15_with_control) -input_state_dict = load_state_dict(path_input) - - -def get_node_name(name, parent_name): - if len(name) <= len(parent_name): - return False, '' - p = name[:len(parent_name)] - if p != parent_name: - return False, '' - return True, name[len(parent_name):] - - -keys = sd15_with_control_state_dict.keys() - -final_state_dict = {} -for key in keys: - is_first_stage, _ = get_node_name(key, 'first_stage_model') - is_cond_stage, _ = get_node_name(key, 'cond_stage_model') - if is_first_stage or is_cond_stage: - final_state_dict[key] = input_state_dict[key] - continue - p = sd15_with_control_state_dict[key] - is_control, node_name = get_node_name(key, 'control_') - if is_control: - sd15_key_name = 'model.diffusion_' + node_name - else: - sd15_key_name = key - if sd15_key_name in input_state_dict: - p_new = p + input_state_dict[sd15_key_name] - sd15_state_dict[sd15_key_name] - # print(f'Offset clone from [{sd15_key_name}] to [{key}]') - else: - p_new = p - # print(f'Direct clone to [{key}]') - final_state_dict[key] = p_new - -torch.save(final_state_dict, path_output) -print('Transferred model saved at ' + path_output) diff --git a/spaces/Artrajz/vits-simple-api/voice.py b/spaces/Artrajz/vits-simple-api/voice.py deleted file mode 100644 index 9cce5625519a1aedc0dae7e89be2513684daa4d4..0000000000000000000000000000000000000000 --- a/spaces/Artrajz/vits-simple-api/voice.py +++ /dev/null @@ -1,325 +0,0 @@ -import os -import librosa -import re -import numpy as np -import torch -import xml.etree.ElementTree as ET -import config -import soundfile as sf -from io import BytesIO -from graiax import silkcoder -from logger import logger -from contants import ModelType -from scipy.signal import resample_poly - - -# torch.set_num_threads(1) # 设置torch线程为1 - - -class TTS: - def __init__(self, voice_obj, voice_speakers, **kwargs): - self._voice_obj = voice_obj - self._voice_speakers = voice_speakers - self._strength_dict = {"x-weak": 0.25, "weak": 0.5, "Medium": 0.75, "Strong": 1, "x-strong": 1.25} - self._speakers_count = sum([len(self._voice_speakers[i]) for i in self._voice_speakers]) - self._vits_speakers_count = len(self._voice_speakers[ModelType.VITS.value]) - self._hubert_speakers_count = len(self._voice_speakers[ModelType.HUBERT_VITS.value]) - self._w2v2_speakers_count = len(self._voice_speakers[ModelType.W2V2_VITS.value]) - self._w2v2_emotion_count = kwargs.get("w2v2_emotion_count", 0) - self._bert_vits2_speakers_count = len(self._voice_speakers[ModelType.BERT_VITS2.value]) - self.dem = None - - # Initialization information - self.logger = logger - self.logger.info(f"torch:{torch.__version__} cuda_available:{torch.cuda.is_available()}") - self.logger.info(f'device:{kwargs.get("device")} device.type:{kwargs.get("device").type}') - - if getattr(config, "DIMENSIONAL_EMOTION_MODEL", None) != None: - try: - import audonnx - root = os.path.dirname(config.DIMENSIONAL_EMOTION_MODEL) - model_file = config.DIMENSIONAL_EMOTION_MODEL - self.dem = audonnx.load(root=root, model_file=model_file) - except Exception as e: - self.logger.warning(f"Load DIMENSIONAL_EMOTION_MODEL failed {e}") - - if self._vits_speakers_count != 0: self.logger.info(f"[{ModelType.VITS.value}] {self._vits_speakers_count} speakers") - if self._hubert_speakers_count != 0: self.logger.info(f"[{ModelType.HUBERT_VITS.value}] {self._hubert_speakers_count} speakers") - if self._w2v2_speakers_count != 0: self.logger.info(f"[{ModelType.W2V2_VITS.value}] {self._w2v2_speakers_count} speakers") - if self._bert_vits2_speakers_count != 0: self.logger.info( - f"[{ModelType.BERT_VITS2.value}] {self._bert_vits2_speakers_count} speakers") - self.logger.info(f"{self._speakers_count} speakers in total.") - if self._speakers_count == 0: - self.logger.warning(f"No model was loaded.") - - @property - def voice_speakers(self): - return self._voice_speakers - - @property - def speakers_count(self): - return self._speakers_count - - @property - def vits_speakers_count(self): - return self._vits_speakers_count - - @property - def hubert_speakers_count(self): - return self._hubert_speakers_count - - @property - def w2v2_speakers_count(self): - return self._w2v2_speakers_count - - @property - def w2v2_emotion_count(self): - return self._w2v2_emotion_count - - @property - def bert_vits2_speakers_count(self): - return self._bert_vits2_speakers_count - - def encode(self, sampling_rate, audio, format): - with BytesIO() as f: - if format.upper() == 'OGG': - sf.write(f, audio, sampling_rate, format="ogg") - return BytesIO(f.getvalue()) - elif format.upper() == 'SILK': - sf.write(f, audio, sampling_rate, format="wav") - return BytesIO(silkcoder.encode(f)) - elif format.upper() == 'MP3': - sf.write(f, audio, sampling_rate, format="mp3") - return BytesIO(f.getvalue()) - elif format.upper() == 'WAV': - sf.write(f, audio, sampling_rate, format="wav") - return BytesIO(f.getvalue()) - elif format.upper() == 'FLAC': - sf.write(f, audio, sampling_rate, format="flac") - return BytesIO(f.getvalue()) - else: - raise ValueError(f"Unsupported format:{format}") - - def convert_time_string(self, time_string): - time_value = float(re.findall(r'\d+\.?\d*', time_string)[0]) - time_unit = re.findall(r'[a-zA-Z]+', time_string)[0].lower() - - if time_unit.upper() == 'MS': - return time_value / 1000 - elif time_unit.upper() == 'S': - return time_value - elif time_unit.upper() == 'MIN': - return time_value * 60 - elif time_unit.upper() == 'H': - return time_value * 3600 - elif time_unit.upper() == 'D': - return time_value * 24 * 3600 # 不会有人真写D吧? - else: - raise ValueError("Unsupported time unit: {}".format(time_unit)) - - def generate_audio_chunks(self, audio): - chunk_size = 4096 - while True: - chunk = audio.read(chunk_size) - if not chunk: - break - yield chunk - - def resample_audio(self, audio, orig_sr, target_sr): - if orig_sr == target_sr: - return audio - - gcd = np.gcd(orig_sr, target_sr) - audio = resample_poly(audio, target_sr // gcd, orig_sr // gcd) - - return audio - - def parse_ssml(self, ssml): - root = ET.fromstring(ssml) - format = root.attrib.get("format", "wav") - voice_tasks = [] - brk_count = 0 - strength_dict = {"x-weak": 0.25, "weak": 0.5, "Medium": 0.75, "Strong": 1, "x-strong": 1.25} - - for element in root.iter(): - if element.tag == "voice": - id = int(element.attrib.get("id", root.attrib.get("id", config.ID))) - lang = element.attrib.get("lang", root.attrib.get("lang", config.LANG)) - length = float(element.attrib.get("length", root.attrib.get("length", config.LENGTH))) - noise = float(element.attrib.get("noise", root.attrib.get("noise", config.NOISE))) - noisew = float(element.attrib.get("noisew", root.attrib.get("noisew", config.NOISEW))) - max = int(element.attrib.get("max", root.attrib.get("max", "0"))) - # 不填写默认就是vits - model_type = element.attrib.get("model_type", root.attrib.get("model_type", "vits")) - # w2v2-vits/emotion-vits才有emotion - emotion = int(element.attrib.get("emotion", root.attrib.get("emotion", 0))) - # Bert-VITS2的参数 - sdp_ratio = int(element.attrib.get("sdp_ratio", root.attrib.get("sdp_ratio", config.SDP_RATIO))) - - voice_element = ET.tostring(element, encoding='unicode') - - pattern_voice = r'(.*?)' - pattern_break = r'' - - matches_voice = re.findall(pattern_voice, voice_element)[0] - matches_break = re.split(pattern_break, matches_voice) - for match in matches_break: - strength = re.search(r'\s*strength\s*=\s*[\'\"](.*?)[\'\"]', match) - time = re.search(r'\s*time\s*=\s*[\'\"](.*?)[\'\"]', match) - # break标签 strength属性 - if strength: - brk = strength_dict[strength.group(1)] - voice_tasks.append({"break": brk}) - brk_count += 1 - # break标签 time属性 - elif time: - brk = self.convert_time_string(time.group(1)) - voice_tasks.append({"break": brk}) - brk_count += 1 - # break标签 为空说明只写了break,默认停顿0.75s - elif match == "": - voice_tasks.append({"break": 0.75}) - brk_count += 1 - # voice标签中除了break剩下的就是文本 - else: - voice_tasks.append({"id": id, - "text": match, - "lang": lang, - "length": length, - "noise": noise, - "noisew": noisew, - "max": max, - "model_type": model_type, - "emotion": emotion, - "sdp_ratio": sdp_ratio - }) - - # 分段末尾停顿0.75s - voice_tasks.append({"break": 0.75}) - elif element.tag == "break": - # brk_count大于0说明voice标签中有break - if brk_count > 0: - brk_count -= 1 - continue - brk = strength_dict.get(element.attrib.get("strength"), - self.convert_time_string(element.attrib.get("time", "750ms"))) - voice_tasks.append({"break": brk}) - - for i in voice_tasks: - self.logger.debug(i) - - return voice_tasks, format - - def process_ssml_infer_task(self, tasks, format): - audios = [] - sampling_rates = [] - last_sampling_rate = 22050 - for task in tasks: - if task.get("break"): - audios.append(np.zeros(int(task.get("break") * 22050), dtype=np.int16)) - sampling_rates.append(last_sampling_rate) - else: - model_type_str = task.get("model_type").upper() - if model_type_str not in [ModelType.VITS.value, ModelType.W2V2_VITS.value, ModelType.BERT_VITS2.value]: - raise ValueError(f"Unsupported model type: {task.get('model_type')}") - model_type = ModelType(model_type_str) - voice_obj = self._voice_obj[model_type][task.get("id")][1] - real_id = self._voice_obj[model_type][task.get("id")][0] - task["id"] = real_id - sampling_rates.append(voice_obj.sampling_rate) - last_sampling_rate = voice_obj.sampling_rate - audio = voice_obj.get_audio(task) - audios.append(audio) - # 得到最高的采样率 - target_sr = max(sampling_rates) - # 所有音频要与最高采样率保持一致 - resampled_audios = [self.resample_audio(audio, sr, target_sr) for audio, sr in zip(audios, sampling_rates)] - audio = np.concatenate(resampled_audios, axis=0) - encoded_audio = self.encode(target_sr, audio, format) - return encoded_audio - - def vits_infer(self, task): - format = task.get("format", "wav") - voice_obj = self._voice_obj[ModelType.VITS][task.get("id")][1] - real_id = self._voice_obj[ModelType.VITS][task.get("id")][0] - task["id"] = real_id # Change to real id - sampling_rate = voice_obj.sampling_rate - audio = voice_obj.get_audio(task, auto_break=True) - encoded_audio = self.encode(sampling_rate, audio, format) - return encoded_audio - - def stream_vits_infer(self, task, fname=None): - format = task.get("format", "wav") - voice_obj = self._voice_obj[ModelType.VITS][task.get("id")][1] - task["id"] = self._voice_obj[ModelType.VITS][task.get("id")][0] - sampling_rate = voice_obj.sampling_rate - genertator = voice_obj.get_stream_audio(task, auto_break=True) - # audio = BytesIO() - for chunk in genertator: - encoded_audio = self.encode(sampling_rate, chunk, format) - for encoded_audio_chunk in self.generate_audio_chunks(encoded_audio): - yield encoded_audio_chunk - # if getattr(config, "SAVE_AUDIO", False): - # audio.write(encoded_audio.getvalue()) - # if getattr(config, "SAVE_AUDIO", False): - # path = f"{config.CACHE_PATH}/{fname}" - # utils.save_audio(audio.getvalue(), path) - - def hubert_vits_infer(self, task): - format = task.get("format", "wav") - voice_obj = self._voice_obj[ModelType.HUBERT_VITS][task.get("id")][1] - task["id"] = self._voice_obj[ModelType.HUBERT_VITS][task.get("id")][0] - sampling_rate = voice_obj.sampling_rate - audio = voice_obj.get_audio(task) - encoded_audio = self.encode(sampling_rate, audio, format) - return encoded_audio - - def w2v2_vits_infer(self, task): - format = task.get("format", "wav") - voice_obj = self._voice_obj[ModelType.W2V2_VITS][task.get("id")][1] - task["id"] = self._voice_obj[ModelType.W2V2_VITS][task.get("id")][0] - sampling_rate = voice_obj.sampling_rate - audio = voice_obj.get_audio(task, auto_break=True) - encoded_audio = self.encode(sampling_rate, audio, format) - return encoded_audio - - def vits_voice_conversion(self, task): - original_id = task.get("original_id") - target_id = task.get("target_id") - format = task.get("format") - - original_id_obj = int(self._voice_obj[ModelType.VITS][original_id][2]) - target_id_obj = int(self._voice_obj[ModelType.VITS][target_id][2]) - - if original_id_obj != target_id_obj: - raise ValueError(f"speakers are in diffrent VITS Model") - - task["original_id"] = int(self._voice_obj[ModelType.VITS][original_id][0]) - task["target_id"] = int(self._voice_obj[ModelType.VITS][target_id][0]) - - voice_obj = self._voice_obj[ModelType.VITS][original_id][1] - sampling_rate = voice_obj.sampling_rate - - audio = voice_obj.voice_conversion(task) - encoded_audio = self.encode(sampling_rate, audio, format) - return encoded_audio - - def get_dimensional_emotion_npy(self, audio): - if self.dem is None: - raise ValueError(f"Please configure DIMENSIONAL_EMOTION_MODEL path in config.py") - audio16000, sampling_rate = librosa.load(audio, sr=16000, mono=True) - emotion = self.dem(audio16000, sampling_rate)['hidden_states'] - emotion_npy = BytesIO() - np.save(emotion_npy, emotion.squeeze(0)) - emotion_npy.seek(0) - - return emotion_npy - - def bert_vits2_infer(self, task): - format = task.get("format", "wav") - voice_obj = self._voice_obj[ModelType.BERT_VITS2][task.get("id")][1] - task["id"] = self._voice_obj[ModelType.BERT_VITS2][task.get("id")][0] - sampling_rate = voice_obj.sampling_rate - audio = voice_obj.get_audio(task, auto_break=True) - encoded_audio = self.encode(sampling_rate, audio, format) - return encoded_audio diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__init__.py deleted file mode 100644 index 1bf26a94226d65089cbc1e50a40c719692517470..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__init__.py +++ /dev/null @@ -1,3360 +0,0 @@ -""" -Package resource API --------------------- - -A resource is a logical file contained within a package, or a logical -subdirectory thereof. The package resource API expects resource names -to have their path parts separated with ``/``, *not* whatever the local -path separator is. Do not use os.path operations to manipulate resource -names being passed into the API. - -The package resource API is designed to work with normal filesystem packages, -.egg files, and unpacked .egg files. It can also work in a limited way with -.zip files and with custom PEP 302 loaders that support the ``get_data()`` -method. - -This module is deprecated. Users are directed to -`importlib.resources `_ -and -`importlib.metadata `_ -instead. -""" - -import sys -import os -import io -import time -import re -import types -import zipfile -import zipimport -import warnings -import stat -import functools -import pkgutil -import operator -import platform -import collections -import plistlib -import email.parser -import errno -import tempfile -import textwrap -import inspect -import ntpath -import posixpath -import importlib -from pkgutil import get_importer - -try: - import _imp -except ImportError: - # Python 3.2 compatibility - import imp as _imp - -try: - FileExistsError -except NameError: - FileExistsError = OSError - -# capture these to bypass sandboxing -from os import utime - -try: - from os import mkdir, rename, unlink - - WRITE_SUPPORT = True -except ImportError: - # no write support, probably under GAE - WRITE_SUPPORT = False - -from os import open as os_open -from os.path import isdir, split - -try: - import importlib.machinery as importlib_machinery - - # access attribute to force import under delayed import mechanisms. - importlib_machinery.__name__ -except ImportError: - importlib_machinery = None - -from pip._internal.utils._jaraco_text import ( - yield_lines, - drop_comment, - join_continuation, -) - -from pip._vendor import platformdirs -from pip._vendor import packaging - -__import__('pip._vendor.packaging.version') -__import__('pip._vendor.packaging.specifiers') -__import__('pip._vendor.packaging.requirements') -__import__('pip._vendor.packaging.markers') -__import__('pip._vendor.packaging.utils') - -if sys.version_info < (3, 5): - raise RuntimeError("Python 3.5 or later is required") - -# declare some globals that will be defined later to -# satisfy the linters. -require = None -working_set = None -add_activation_listener = None -resources_stream = None -cleanup_resources = None -resource_dir = None -resource_stream = None -set_extraction_path = None -resource_isdir = None -resource_string = None -iter_entry_points = None -resource_listdir = None -resource_filename = None -resource_exists = None -_distribution_finders = None -_namespace_handlers = None -_namespace_packages = None - - -warnings.warn("pkg_resources is deprecated as an API", DeprecationWarning) - - -_PEP440_FALLBACK = re.compile(r"^v?(?P(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I) - - -class PEP440Warning(RuntimeWarning): - """ - Used when there is an issue with a version or specifier not complying with - PEP 440. - """ - - -parse_version = packaging.version.Version - - -_state_vars = {} - - -def _declare_state(vartype, **kw): - globals().update(kw) - _state_vars.update(dict.fromkeys(kw, vartype)) - - -def __getstate__(): - state = {} - g = globals() - for k, v in _state_vars.items(): - state[k] = g['_sget_' + v](g[k]) - return state - - -def __setstate__(state): - g = globals() - for k, v in state.items(): - g['_sset_' + _state_vars[k]](k, g[k], v) - return state - - -def _sget_dict(val): - return val.copy() - - -def _sset_dict(key, ob, state): - ob.clear() - ob.update(state) - - -def _sget_object(val): - return val.__getstate__() - - -def _sset_object(key, ob, state): - ob.__setstate__(state) - - -_sget_none = _sset_none = lambda *args: None - - -def get_supported_platform(): - """Return this platform's maximum compatible version. - - distutils.util.get_platform() normally reports the minimum version - of macOS that would be required to *use* extensions produced by - distutils. But what we want when checking compatibility is to know the - version of macOS that we are *running*. To allow usage of packages that - explicitly require a newer version of macOS, we must also know the - current version of the OS. - - If this condition occurs for any other platform with a version in its - platform strings, this function should be extended accordingly. - """ - plat = get_build_platform() - m = macosVersionString.match(plat) - if m is not None and sys.platform == "darwin": - try: - plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3)) - except ValueError: - # not macOS - pass - return plat - - -__all__ = [ - # Basic resource access and distribution/entry point discovery - 'require', - 'run_script', - 'get_provider', - 'get_distribution', - 'load_entry_point', - 'get_entry_map', - 'get_entry_info', - 'iter_entry_points', - 'resource_string', - 'resource_stream', - 'resource_filename', - 'resource_listdir', - 'resource_exists', - 'resource_isdir', - # Environmental control - 'declare_namespace', - 'working_set', - 'add_activation_listener', - 'find_distributions', - 'set_extraction_path', - 'cleanup_resources', - 'get_default_cache', - # Primary implementation classes - 'Environment', - 'WorkingSet', - 'ResourceManager', - 'Distribution', - 'Requirement', - 'EntryPoint', - # Exceptions - 'ResolutionError', - 'VersionConflict', - 'DistributionNotFound', - 'UnknownExtra', - 'ExtractionError', - # Warnings - 'PEP440Warning', - # Parsing functions and string utilities - 'parse_requirements', - 'parse_version', - 'safe_name', - 'safe_version', - 'get_platform', - 'compatible_platforms', - 'yield_lines', - 'split_sections', - 'safe_extra', - 'to_filename', - 'invalid_marker', - 'evaluate_marker', - # filesystem utilities - 'ensure_directory', - 'normalize_path', - # Distribution "precedence" constants - 'EGG_DIST', - 'BINARY_DIST', - 'SOURCE_DIST', - 'CHECKOUT_DIST', - 'DEVELOP_DIST', - # "Provider" interfaces, implementations, and registration/lookup APIs - 'IMetadataProvider', - 'IResourceProvider', - 'FileMetadata', - 'PathMetadata', - 'EggMetadata', - 'EmptyProvider', - 'empty_provider', - 'NullProvider', - 'EggProvider', - 'DefaultProvider', - 'ZipProvider', - 'register_finder', - 'register_namespace_handler', - 'register_loader_type', - 'fixup_namespace_packages', - 'get_importer', - # Warnings - 'PkgResourcesDeprecationWarning', - # Deprecated/backward compatibility only - 'run_main', - 'AvailableDistributions', -] - - -class ResolutionError(Exception): - """Abstract base for dependency resolution errors""" - - def __repr__(self): - return self.__class__.__name__ + repr(self.args) - - -class VersionConflict(ResolutionError): - """ - An already-installed version conflicts with the requested version. - - Should be initialized with the installed Distribution and the requested - Requirement. - """ - - _template = "{self.dist} is installed but {self.req} is required" - - @property - def dist(self): - return self.args[0] - - @property - def req(self): - return self.args[1] - - def report(self): - return self._template.format(**locals()) - - def with_context(self, required_by): - """ - If required_by is non-empty, return a version of self that is a - ContextualVersionConflict. - """ - if not required_by: - return self - args = self.args + (required_by,) - return ContextualVersionConflict(*args) - - -class ContextualVersionConflict(VersionConflict): - """ - A VersionConflict that accepts a third parameter, the set of the - requirements that required the installed Distribution. - """ - - _template = VersionConflict._template + ' by {self.required_by}' - - @property - def required_by(self): - return self.args[2] - - -class DistributionNotFound(ResolutionError): - """A requested distribution was not found""" - - _template = ( - "The '{self.req}' distribution was not found " - "and is required by {self.requirers_str}" - ) - - @property - def req(self): - return self.args[0] - - @property - def requirers(self): - return self.args[1] - - @property - def requirers_str(self): - if not self.requirers: - return 'the application' - return ', '.join(self.requirers) - - def report(self): - return self._template.format(**locals()) - - def __str__(self): - return self.report() - - -class UnknownExtra(ResolutionError): - """Distribution doesn't have an "extra feature" of the given name""" - - -_provider_factories = {} - -PY_MAJOR = '{}.{}'.format(*sys.version_info) -EGG_DIST = 3 -BINARY_DIST = 2 -SOURCE_DIST = 1 -CHECKOUT_DIST = 0 -DEVELOP_DIST = -1 - - -def register_loader_type(loader_type, provider_factory): - """Register `provider_factory` to make providers for `loader_type` - - `loader_type` is the type or class of a PEP 302 ``module.__loader__``, - and `provider_factory` is a function that, passed a *module* object, - returns an ``IResourceProvider`` for that module. - """ - _provider_factories[loader_type] = provider_factory - - -def get_provider(moduleOrReq): - """Return an IResourceProvider for the named module or requirement""" - if isinstance(moduleOrReq, Requirement): - return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] - try: - module = sys.modules[moduleOrReq] - except KeyError: - __import__(moduleOrReq) - module = sys.modules[moduleOrReq] - loader = getattr(module, '__loader__', None) - return _find_adapter(_provider_factories, loader)(module) - - -def _macos_vers(_cache=[]): - if not _cache: - version = platform.mac_ver()[0] - # fallback for MacPorts - if version == '': - plist = '/System/Library/CoreServices/SystemVersion.plist' - if os.path.exists(plist): - if hasattr(plistlib, 'readPlist'): - plist_content = plistlib.readPlist(plist) - if 'ProductVersion' in plist_content: - version = plist_content['ProductVersion'] - - _cache.append(version.split('.')) - return _cache[0] - - -def _macos_arch(machine): - return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) - - -def get_build_platform(): - """Return this platform's string for platform-specific distributions - - XXX Currently this is the same as ``distutils.util.get_platform()``, but it - needs some hacks for Linux and macOS. - """ - from sysconfig import get_platform - - plat = get_platform() - if sys.platform == "darwin" and not plat.startswith('macosx-'): - try: - version = _macos_vers() - machine = os.uname()[4].replace(" ", "_") - return "macosx-%d.%d-%s" % ( - int(version[0]), - int(version[1]), - _macos_arch(machine), - ) - except ValueError: - # if someone is running a non-Mac darwin system, this will fall - # through to the default implementation - pass - return plat - - -macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") -darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") -# XXX backward compat -get_platform = get_build_platform - - -def compatible_platforms(provided, required): - """Can code for the `provided` platform run on the `required` platform? - - Returns true if either platform is ``None``, or the platforms are equal. - - XXX Needs compatibility checks for Linux and other unixy OSes. - """ - if provided is None or required is None or provided == required: - # easy case - return True - - # macOS special cases - reqMac = macosVersionString.match(required) - if reqMac: - provMac = macosVersionString.match(provided) - - # is this a Mac package? - if not provMac: - # this is backwards compatibility for packages built before - # setuptools 0.6. All packages built after this point will - # use the new macOS designation. - provDarwin = darwinVersionString.match(provided) - if provDarwin: - dversion = int(provDarwin.group(1)) - macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) - if ( - dversion == 7 - and macosversion >= "10.3" - or dversion == 8 - and macosversion >= "10.4" - ): - return True - # egg isn't macOS or legacy darwin - return False - - # are they the same major version and machine type? - if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3): - return False - - # is the required OS major update >= the provided one? - if int(provMac.group(2)) > int(reqMac.group(2)): - return False - - return True - - # XXX Linux and other platforms' special cases should go here - return False - - -def run_script(dist_spec, script_name): - """Locate distribution `dist_spec` and run its `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - require(dist_spec)[0].run_script(script_name, ns) - - -# backward compatibility -run_main = run_script - - -def get_distribution(dist): - """Return a current distribution object for a Requirement or string""" - if isinstance(dist, str): - dist = Requirement.parse(dist) - if isinstance(dist, Requirement): - dist = get_provider(dist) - if not isinstance(dist, Distribution): - raise TypeError("Expected string, Requirement, or Distribution", dist) - return dist - - -def load_entry_point(dist, group, name): - """Return `name` entry point of `group` for `dist` or raise ImportError""" - return get_distribution(dist).load_entry_point(group, name) - - -def get_entry_map(dist, group=None): - """Return the entry point map for `group`, or the full entry map""" - return get_distribution(dist).get_entry_map(group) - - -def get_entry_info(dist, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return get_distribution(dist).get_entry_info(group, name) - - -class IMetadataProvider: - def has_metadata(name): - """Does the package's distribution contain the named metadata?""" - - def get_metadata(name): - """The named metadata resource as a string""" - - def get_metadata_lines(name): - """Yield named metadata resource as list of non-blank non-comment lines - - Leading and trailing whitespace is stripped from each line, and lines - with ``#`` as the first non-blank character are omitted.""" - - def metadata_isdir(name): - """Is the named metadata a directory? (like ``os.path.isdir()``)""" - - def metadata_listdir(name): - """List of metadata names in the directory (like ``os.listdir()``)""" - - def run_script(script_name, namespace): - """Execute the named script in the supplied namespace dictionary""" - - -class IResourceProvider(IMetadataProvider): - """An object that provides access to package resources""" - - def get_resource_filename(manager, resource_name): - """Return a true filesystem path for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_stream(manager, resource_name): - """Return a readable file-like object for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_string(manager, resource_name): - """Return a string containing the contents of `resource_name` - - `manager` must be an ``IResourceManager``""" - - def has_resource(resource_name): - """Does the package contain the named resource?""" - - def resource_isdir(resource_name): - """Is the named resource a directory? (like ``os.path.isdir()``)""" - - def resource_listdir(resource_name): - """List of resource names in the directory (like ``os.listdir()``)""" - - -class WorkingSet: - """A collection of active distributions on sys.path (or a similar list)""" - - def __init__(self, entries=None): - """Create working set from list of path entries (default=sys.path)""" - self.entries = [] - self.entry_keys = {} - self.by_key = {} - self.normalized_to_canonical_keys = {} - self.callbacks = [] - - if entries is None: - entries = sys.path - - for entry in entries: - self.add_entry(entry) - - @classmethod - def _build_master(cls): - """ - Prepare the master working set. - """ - ws = cls() - try: - from __main__ import __requires__ - except ImportError: - # The main program does not list any requirements - return ws - - # ensure the requirements are met - try: - ws.require(__requires__) - except VersionConflict: - return cls._build_from_requirements(__requires__) - - return ws - - @classmethod - def _build_from_requirements(cls, req_spec): - """ - Build a working set from a requirement spec. Rewrites sys.path. - """ - # try it without defaults already on sys.path - # by starting with an empty path - ws = cls([]) - reqs = parse_requirements(req_spec) - dists = ws.resolve(reqs, Environment()) - for dist in dists: - ws.add(dist) - - # add any missing entries from sys.path - for entry in sys.path: - if entry not in ws.entries: - ws.add_entry(entry) - - # then copy back to sys.path - sys.path[:] = ws.entries - return ws - - def add_entry(self, entry): - """Add a path item to ``.entries``, finding any distributions on it - - ``find_distributions(entry, True)`` is used to find distributions - corresponding to the path entry, and they are added. `entry` is - always appended to ``.entries``, even if it is already present. - (This is because ``sys.path`` can contain the same value more than - once, and the ``.entries`` of the ``sys.path`` WorkingSet should always - equal ``sys.path``.) - """ - self.entry_keys.setdefault(entry, []) - self.entries.append(entry) - for dist in find_distributions(entry, True): - self.add(dist, entry, False) - - def __contains__(self, dist): - """True if `dist` is the active distribution for its project""" - return self.by_key.get(dist.key) == dist - - def find(self, req): - """Find a distribution matching requirement `req` - - If there is an active distribution for the requested project, this - returns it as long as it meets the version requirement specified by - `req`. But, if there is an active distribution for the project and it - does *not* meet the `req` requirement, ``VersionConflict`` is raised. - If there is no active distribution for the requested project, ``None`` - is returned. - """ - dist = self.by_key.get(req.key) - - if dist is None: - canonical_key = self.normalized_to_canonical_keys.get(req.key) - - if canonical_key is not None: - req.key = canonical_key - dist = self.by_key.get(canonical_key) - - if dist is not None and dist not in req: - # XXX add more info - raise VersionConflict(dist, req) - return dist - - def iter_entry_points(self, group, name=None): - """Yield entry point objects from `group` matching `name` - - If `name` is None, yields all entry points in `group` from all - distributions in the working set, otherwise only ones matching - both `group` and `name` are yielded (in distribution order). - """ - return ( - entry - for dist in self - for entry in dist.get_entry_map(group).values() - if name is None or name == entry.name - ) - - def run_script(self, requires, script_name): - """Locate distribution for `requires` and run `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - self.require(requires)[0].run_script(script_name, ns) - - def __iter__(self): - """Yield distributions for non-duplicate projects in the working set - - The yield order is the order in which the items' path entries were - added to the working set. - """ - seen = {} - for item in self.entries: - if item not in self.entry_keys: - # workaround a cache issue - continue - - for key in self.entry_keys[item]: - if key not in seen: - seen[key] = 1 - yield self.by_key[key] - - def add(self, dist, entry=None, insert=True, replace=False): - """Add `dist` to working set, associated with `entry` - - If `entry` is unspecified, it defaults to the ``.location`` of `dist`. - On exit from this routine, `entry` is added to the end of the working - set's ``.entries`` (if it wasn't already present). - - `dist` is only added to the working set if it's for a project that - doesn't already have a distribution in the set, unless `replace=True`. - If it's added, any callbacks registered with the ``subscribe()`` method - will be called. - """ - if insert: - dist.insert_on(self.entries, entry, replace=replace) - - if entry is None: - entry = dist.location - keys = self.entry_keys.setdefault(entry, []) - keys2 = self.entry_keys.setdefault(dist.location, []) - if not replace and dist.key in self.by_key: - # ignore hidden distros - return - - self.by_key[dist.key] = dist - normalized_name = packaging.utils.canonicalize_name(dist.key) - self.normalized_to_canonical_keys[normalized_name] = dist.key - if dist.key not in keys: - keys.append(dist.key) - if dist.key not in keys2: - keys2.append(dist.key) - self._added_new(dist) - - def resolve( - self, - requirements, - env=None, - installer=None, - replace_conflicting=False, - extras=None, - ): - """List all distributions needed to (recursively) meet `requirements` - - `requirements` must be a sequence of ``Requirement`` objects. `env`, - if supplied, should be an ``Environment`` instance. If - not supplied, it defaults to all distributions available within any - entry or distribution in the working set. `installer`, if supplied, - will be invoked with each requirement that cannot be met by an - already-installed distribution; it should return a ``Distribution`` or - ``None``. - - Unless `replace_conflicting=True`, raises a VersionConflict exception - if - any requirements are found on the path that have the correct name but - the wrong version. Otherwise, if an `installer` is supplied it will be - invoked to obtain the correct version of the requirement and activate - it. - - `extras` is a list of the extras to be used with these requirements. - This is important because extra requirements may look like `my_req; - extra = "my_extra"`, which would otherwise be interpreted as a purely - optional requirement. Instead, we want to be able to assert that these - requirements are truly required. - """ - - # set up the stack - requirements = list(requirements)[::-1] - # set of processed requirements - processed = {} - # key -> dist - best = {} - to_activate = [] - - req_extras = _ReqExtras() - - # Mapping of requirement to set of distributions that required it; - # useful for reporting info about conflicts. - required_by = collections.defaultdict(set) - - while requirements: - # process dependencies breadth-first - req = requirements.pop(0) - if req in processed: - # Ignore cyclic or redundant dependencies - continue - - if not req_extras.markers_pass(req, extras): - continue - - dist = self._resolve_dist( - req, best, replace_conflicting, env, installer, required_by, to_activate - ) - - # push the new requirements onto the stack - new_requirements = dist.requires(req.extras)[::-1] - requirements.extend(new_requirements) - - # Register the new requirements needed by req - for new_requirement in new_requirements: - required_by[new_requirement].add(req.project_name) - req_extras[new_requirement] = req.extras - - processed[req] = True - - # return list of distros to activate - return to_activate - - def _resolve_dist( - self, req, best, replace_conflicting, env, installer, required_by, to_activate - ): - dist = best.get(req.key) - if dist is None: - # Find the best distribution and add it to the map - dist = self.by_key.get(req.key) - if dist is None or (dist not in req and replace_conflicting): - ws = self - if env is None: - if dist is None: - env = Environment(self.entries) - else: - # Use an empty environment and workingset to avoid - # any further conflicts with the conflicting - # distribution - env = Environment([]) - ws = WorkingSet([]) - dist = best[req.key] = env.best_match( - req, ws, installer, replace_conflicting=replace_conflicting - ) - if dist is None: - requirers = required_by.get(req, None) - raise DistributionNotFound(req, requirers) - to_activate.append(dist) - if dist not in req: - # Oops, the "best" so far conflicts with a dependency - dependent_req = required_by[req] - raise VersionConflict(dist, req).with_context(dependent_req) - return dist - - def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): - """Find all activatable distributions in `plugin_env` - - Example usage:: - - distributions, errors = working_set.find_plugins( - Environment(plugin_dirlist) - ) - # add plugins+libs to sys.path - map(working_set.add, distributions) - # display errors - print('Could not load', errors) - - The `plugin_env` should be an ``Environment`` instance that contains - only distributions that are in the project's "plugin directory" or - directories. The `full_env`, if supplied, should be an ``Environment`` - contains all currently-available distributions. If `full_env` is not - supplied, one is created automatically from the ``WorkingSet`` this - method is called on, which will typically mean that every directory on - ``sys.path`` will be scanned for distributions. - - `installer` is a standard installer callback as used by the - ``resolve()`` method. The `fallback` flag indicates whether we should - attempt to resolve older versions of a plugin if the newest version - cannot be resolved. - - This method returns a 2-tuple: (`distributions`, `error_info`), where - `distributions` is a list of the distributions found in `plugin_env` - that were loadable, along with any other distributions that are needed - to resolve their dependencies. `error_info` is a dictionary mapping - unloadable plugin distributions to an exception instance describing the - error that occurred. Usually this will be a ``DistributionNotFound`` or - ``VersionConflict`` instance. - """ - - plugin_projects = list(plugin_env) - # scan project names in alphabetic order - plugin_projects.sort() - - error_info = {} - distributions = {} - - if full_env is None: - env = Environment(self.entries) - env += plugin_env - else: - env = full_env + plugin_env - - shadow_set = self.__class__([]) - # put all our entries in shadow_set - list(map(shadow_set.add, self)) - - for project_name in plugin_projects: - for dist in plugin_env[project_name]: - req = [dist.as_requirement()] - - try: - resolvees = shadow_set.resolve(req, env, installer) - - except ResolutionError as v: - # save error info - error_info[dist] = v - if fallback: - # try the next older version of project - continue - else: - # give up on this project, keep going - break - - else: - list(map(shadow_set.add, resolvees)) - distributions.update(dict.fromkeys(resolvees)) - - # success, no need to try any more versions of this project - break - - distributions = list(distributions) - distributions.sort() - - return distributions, error_info - - def require(self, *requirements): - """Ensure that distributions matching `requirements` are activated - - `requirements` must be a string or a (possibly-nested) sequence - thereof, specifying the distributions and versions required. The - return value is a sequence of the distributions that needed to be - activated to fulfill the requirements; all relevant distributions are - included, even if they were already activated in this working set. - """ - needed = self.resolve(parse_requirements(requirements)) - - for dist in needed: - self.add(dist) - - return needed - - def subscribe(self, callback, existing=True): - """Invoke `callback` for all distributions - - If `existing=True` (default), - call on all existing ones, as well. - """ - if callback in self.callbacks: - return - self.callbacks.append(callback) - if not existing: - return - for dist in self: - callback(dist) - - def _added_new(self, dist): - for callback in self.callbacks: - callback(dist) - - def __getstate__(self): - return ( - self.entries[:], - self.entry_keys.copy(), - self.by_key.copy(), - self.normalized_to_canonical_keys.copy(), - self.callbacks[:], - ) - - def __setstate__(self, e_k_b_n_c): - entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c - self.entries = entries[:] - self.entry_keys = keys.copy() - self.by_key = by_key.copy() - self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy() - self.callbacks = callbacks[:] - - -class _ReqExtras(dict): - """ - Map each requirement to the extras that demanded it. - """ - - def markers_pass(self, req, extras=None): - """ - Evaluate markers for req against each extra that - demanded it. - - Return False if the req has a marker and fails - evaluation. Otherwise, return True. - """ - extra_evals = ( - req.marker.evaluate({'extra': extra}) - for extra in self.get(req, ()) + (extras or (None,)) - ) - return not req.marker or any(extra_evals) - - -class Environment: - """Searchable snapshot of distributions on a search path""" - - def __init__( - self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR - ): - """Snapshot distributions available on a search path - - Any distributions found on `search_path` are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. - - `platform` is an optional string specifying the name of the platform - that platform-specific distributions must be compatible with. If - unspecified, it defaults to the current platform. `python` is an - optional string naming the desired version of Python (e.g. ``'3.6'``); - it defaults to the current version. - - You may explicitly set `platform` (and/or `python`) to ``None`` if you - wish to map *all* distributions, not just those compatible with the - running platform or Python version. - """ - self._distmap = {} - self.platform = platform - self.python = python - self.scan(search_path) - - def can_add(self, dist): - """Is distribution `dist` acceptable for this environment? - - The distribution must match the platform and python version - requirements specified when this environment was created, or False - is returned. - """ - py_compat = ( - self.python is None - or dist.py_version is None - or dist.py_version == self.python - ) - return py_compat and compatible_platforms(dist.platform, self.platform) - - def remove(self, dist): - """Remove `dist` from the environment""" - self._distmap[dist.key].remove(dist) - - def scan(self, search_path=None): - """Scan `search_path` for distributions usable in this environment - - Any distributions found are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. Only distributions conforming to - the platform/python version defined at initialization are added. - """ - if search_path is None: - search_path = sys.path - - for item in search_path: - for dist in find_distributions(item): - self.add(dist) - - def __getitem__(self, project_name): - """Return a newest-to-oldest list of distributions for `project_name` - - Uses case-insensitive `project_name` comparison, assuming all the - project's distributions use their project's name converted to all - lowercase as their key. - - """ - distribution_key = project_name.lower() - return self._distmap.get(distribution_key, []) - - def add(self, dist): - """Add `dist` if we ``can_add()`` it and it has not already been added""" - if self.can_add(dist) and dist.has_version(): - dists = self._distmap.setdefault(dist.key, []) - if dist not in dists: - dists.append(dist) - dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - - def best_match(self, req, working_set, installer=None, replace_conflicting=False): - """Find distribution best matching `req` and usable on `working_set` - - This calls the ``find(req)`` method of the `working_set` to see if a - suitable distribution is already active. (This may raise - ``VersionConflict`` if an unsuitable version of the project is already - active in the specified `working_set`.) If a suitable distribution - isn't active, this method returns the newest distribution in the - environment that meets the ``Requirement`` in `req`. If no suitable - distribution is found, and `installer` is supplied, then the result of - calling the environment's ``obtain(req, installer)`` method will be - returned. - """ - try: - dist = working_set.find(req) - except VersionConflict: - if not replace_conflicting: - raise - dist = None - if dist is not None: - return dist - for dist in self[req.key]: - if dist in req: - return dist - # try to download/install - return self.obtain(req, installer) - - def obtain(self, requirement, installer=None): - """Obtain a distribution matching `requirement` (e.g. via download) - - Obtain a distro that matches requirement (e.g. via download). In the - base ``Environment`` class, this routine just returns - ``installer(requirement)``, unless `installer` is None, in which case - None is returned instead. This method is a hook that allows subclasses - to attempt other ways of obtaining a distribution before falling back - to the `installer` argument.""" - if installer is not None: - return installer(requirement) - - def __iter__(self): - """Yield the unique project names of the available distributions""" - for key in self._distmap.keys(): - if self[key]: - yield key - - def __iadd__(self, other): - """In-place addition of a distribution or environment""" - if isinstance(other, Distribution): - self.add(other) - elif isinstance(other, Environment): - for project in other: - for dist in other[project]: - self.add(dist) - else: - raise TypeError("Can't add %r to environment" % (other,)) - return self - - def __add__(self, other): - """Add an environment or distribution to an environment""" - new = self.__class__([], platform=None, python=None) - for env in self, other: - new += env - return new - - -# XXX backward compatibility -AvailableDistributions = Environment - - -class ExtractionError(RuntimeError): - """An error occurred extracting a resource - - The following attributes are available from instances of this exception: - - manager - The resource manager that raised this exception - - cache_path - The base directory for resource extraction - - original_error - The exception instance that caused extraction to fail - """ - - -class ResourceManager: - """Manage resource extraction and packages""" - - extraction_path = None - - def __init__(self): - self.cached_files = {} - - def resource_exists(self, package_or_requirement, resource_name): - """Does the named resource exist?""" - return get_provider(package_or_requirement).has_resource(resource_name) - - def resource_isdir(self, package_or_requirement, resource_name): - """Is the named resource an existing directory?""" - return get_provider(package_or_requirement).resource_isdir(resource_name) - - def resource_filename(self, package_or_requirement, resource_name): - """Return a true filesystem path for specified resource""" - return get_provider(package_or_requirement).get_resource_filename( - self, resource_name - ) - - def resource_stream(self, package_or_requirement, resource_name): - """Return a readable file-like object for specified resource""" - return get_provider(package_or_requirement).get_resource_stream( - self, resource_name - ) - - def resource_string(self, package_or_requirement, resource_name): - """Return specified resource as a string""" - return get_provider(package_or_requirement).get_resource_string( - self, resource_name - ) - - def resource_listdir(self, package_or_requirement, resource_name): - """List the contents of the named resource directory""" - return get_provider(package_or_requirement).resource_listdir(resource_name) - - def extraction_error(self): - """Give an error message for problems extracting file(s)""" - - old_exc = sys.exc_info()[1] - cache_path = self.extraction_path or get_default_cache() - - tmpl = textwrap.dedent( - """ - Can't extract file(s) to egg cache - - The following error occurred while trying to extract file(s) - to the Python egg cache: - - {old_exc} - - The Python egg cache directory is currently set to: - - {cache_path} - - Perhaps your account does not have write access to this directory? - You can change the cache directory by setting the PYTHON_EGG_CACHE - environment variable to point to an accessible directory. - """ - ).lstrip() - err = ExtractionError(tmpl.format(**locals())) - err.manager = self - err.cache_path = cache_path - err.original_error = old_exc - raise err - - def get_cache_path(self, archive_name, names=()): - """Return absolute location in cache for `archive_name` and `names` - - The parent directory of the resulting path will be created if it does - not already exist. `archive_name` should be the base filename of the - enclosing egg (which may not be the name of the enclosing zipfile!), - including its ".egg" extension. `names`, if provided, should be a - sequence of path name parts "under" the egg's extraction location. - - This method should only be called by resource providers that need to - obtain an extraction location, and only for names they intend to - extract, as it tracks the generated names for possible cleanup later. - """ - extract_path = self.extraction_path or get_default_cache() - target_path = os.path.join(extract_path, archive_name + '-tmp', *names) - try: - _bypass_ensure_directory(target_path) - except Exception: - self.extraction_error() - - self._warn_unsafe_extraction_path(extract_path) - - self.cached_files[target_path] = 1 - return target_path - - @staticmethod - def _warn_unsafe_extraction_path(path): - """ - If the default extraction path is overridden and set to an insecure - location, such as /tmp, it opens up an opportunity for an attacker to - replace an extracted file with an unauthorized payload. Warn the user - if a known insecure location is used. - - See Distribute #375 for more details. - """ - if os.name == 'nt' and not path.startswith(os.environ['windir']): - # On Windows, permissions are generally restrictive by default - # and temp directories are not writable by other users, so - # bypass the warning. - return - mode = os.stat(path).st_mode - if mode & stat.S_IWOTH or mode & stat.S_IWGRP: - msg = ( - "Extraction path is writable by group/others " - "and vulnerable to attack when " - "used with get_resource_filename ({path}). " - "Consider a more secure " - "location (set with .set_extraction_path or the " - "PYTHON_EGG_CACHE environment variable)." - ).format(**locals()) - warnings.warn(msg, UserWarning) - - def postprocess(self, tempname, filename): - """Perform any platform-specific postprocessing of `tempname` - - This is where Mac header rewrites should be done; other platforms don't - have anything special they should do. - - Resource providers should call this method ONLY after successfully - extracting a compressed resource. They must NOT call it on resources - that are already in the filesystem. - - `tempname` is the current (temporary) name of the file, and `filename` - is the name it will be renamed to by the caller after this routine - returns. - """ - - if os.name == 'posix': - # Make the resource executable - mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 - os.chmod(tempname, mode) - - def set_extraction_path(self, path): - """Set the base path where resources will be extracted to, if needed. - - If you do not call this routine before any extractions take place, the - path defaults to the return value of ``get_default_cache()``. (Which - is based on the ``PYTHON_EGG_CACHE`` environment variable, with various - platform-specific fallbacks. See that routine's documentation for more - details.) - - Resources are extracted to subdirectories of this path based upon - information given by the ``IResourceProvider``. You may set this to a - temporary directory, but then you must call ``cleanup_resources()`` to - delete the extracted files when done. There is no guarantee that - ``cleanup_resources()`` will be able to remove all extracted files. - - (Note: you may not change the extraction path for a given resource - manager once resources have been extracted, unless you first call - ``cleanup_resources()``.) - """ - if self.cached_files: - raise ValueError("Can't change extraction path, files already extracted") - - self.extraction_path = path - - def cleanup_resources(self, force=False): - """ - Delete all extracted resource files and directories, returning a list - of the file and directory names that could not be successfully removed. - This function does not have any concurrency protection, so it should - generally only be called when the extraction path is a temporary - directory exclusive to a single process. This method is not - automatically called; you must call it explicitly or register it as an - ``atexit`` function if you wish to ensure cleanup of a temporary - directory used for extractions. - """ - # XXX - - -def get_default_cache(): - """ - Return the ``PYTHON_EGG_CACHE`` environment variable - or a platform-relevant user cache dir for an app - named "Python-Eggs". - """ - return os.environ.get('PYTHON_EGG_CACHE') or platformdirs.user_cache_dir( - appname='Python-Eggs' - ) - - -def safe_name(name): - """Convert an arbitrary string to a standard distribution name - - Any runs of non-alphanumeric/. characters are replaced with a single '-'. - """ - return re.sub('[^A-Za-z0-9.]+', '-', name) - - -def safe_version(version): - """ - Convert an arbitrary string to a standard version string - """ - try: - # normalize the version - return str(packaging.version.Version(version)) - except packaging.version.InvalidVersion: - version = version.replace(' ', '.') - return re.sub('[^A-Za-z0-9.]+', '-', version) - - -def _forgiving_version(version): - """Fallback when ``safe_version`` is not safe enough - >>> parse_version(_forgiving_version('0.23ubuntu1')) - - >>> parse_version(_forgiving_version('0.23-')) - - >>> parse_version(_forgiving_version('0.-_')) - - >>> parse_version(_forgiving_version('42.+?1')) - - >>> parse_version(_forgiving_version('hello world')) - - """ - version = version.replace(' ', '.') - match = _PEP440_FALLBACK.search(version) - if match: - safe = match["safe"] - rest = version[len(safe):] - else: - safe = "0" - rest = version - local = f"sanitized.{_safe_segment(rest)}".strip(".") - return f"{safe}.dev0+{local}" - - -def _safe_segment(segment): - """Convert an arbitrary string into a safe segment""" - segment = re.sub('[^A-Za-z0-9.]+', '-', segment) - segment = re.sub('-[^A-Za-z0-9]+', '-', segment) - return re.sub(r'\.[^A-Za-z0-9]+', '.', segment).strip(".-") - - -def safe_extra(extra): - """Convert an arbitrary string to a standard 'extra' name - - Any runs of non-alphanumeric characters are replaced with a single '_', - and the result is always lowercased. - """ - return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() - - -def to_filename(name): - """Convert a project or version name to its filename-escaped form - - Any '-' characters are currently replaced with '_'. - """ - return name.replace('-', '_') - - -def invalid_marker(text): - """ - Validate text as a PEP 508 environment marker; return an exception - if invalid or False otherwise. - """ - try: - evaluate_marker(text) - except SyntaxError as e: - e.filename = None - e.lineno = None - return e - return False - - -def evaluate_marker(text, extra=None): - """ - Evaluate a PEP 508 environment marker. - Return a boolean indicating the marker result in this environment. - Raise SyntaxError if marker is invalid. - - This implementation uses the 'pyparsing' module. - """ - try: - marker = packaging.markers.Marker(text) - return marker.evaluate() - except packaging.markers.InvalidMarker as e: - raise SyntaxError(e) from e - - -class NullProvider: - """Try to implement resources and metadata for arbitrary PEP 302 loaders""" - - egg_name = None - egg_info = None - loader = None - - def __init__(self, module): - self.loader = getattr(module, '__loader__', None) - self.module_path = os.path.dirname(getattr(module, '__file__', '')) - - def get_resource_filename(self, manager, resource_name): - return self._fn(self.module_path, resource_name) - - def get_resource_stream(self, manager, resource_name): - return io.BytesIO(self.get_resource_string(manager, resource_name)) - - def get_resource_string(self, manager, resource_name): - return self._get(self._fn(self.module_path, resource_name)) - - def has_resource(self, resource_name): - return self._has(self._fn(self.module_path, resource_name)) - - def _get_metadata_path(self, name): - return self._fn(self.egg_info, name) - - def has_metadata(self, name): - if not self.egg_info: - return self.egg_info - - path = self._get_metadata_path(name) - return self._has(path) - - def get_metadata(self, name): - if not self.egg_info: - return "" - path = self._get_metadata_path(name) - value = self._get(path) - try: - return value.decode('utf-8') - except UnicodeDecodeError as exc: - # Include the path in the error message to simplify - # troubleshooting, and without changing the exception type. - exc.reason += ' in {} file at path: {}'.format(name, path) - raise - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - def resource_isdir(self, resource_name): - return self._isdir(self._fn(self.module_path, resource_name)) - - def metadata_isdir(self, name): - return self.egg_info and self._isdir(self._fn(self.egg_info, name)) - - def resource_listdir(self, resource_name): - return self._listdir(self._fn(self.module_path, resource_name)) - - def metadata_listdir(self, name): - if self.egg_info: - return self._listdir(self._fn(self.egg_info, name)) - return [] - - def run_script(self, script_name, namespace): - script = 'scripts/' + script_name - if not self.has_metadata(script): - raise ResolutionError( - "Script {script!r} not found in metadata at {self.egg_info!r}".format( - **locals() - ), - ) - script_text = self.get_metadata(script).replace('\r\n', '\n') - script_text = script_text.replace('\r', '\n') - script_filename = self._fn(self.egg_info, script) - namespace['__file__'] = script_filename - if os.path.exists(script_filename): - with open(script_filename) as fid: - source = fid.read() - code = compile(source, script_filename, 'exec') - exec(code, namespace, namespace) - else: - from linecache import cache - - cache[script_filename] = ( - len(script_text), - 0, - script_text.split('\n'), - script_filename, - ) - script_code = compile(script_text, script_filename, 'exec') - exec(script_code, namespace, namespace) - - def _has(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _isdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _listdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _fn(self, base, resource_name): - self._validate_resource_path(resource_name) - if resource_name: - return os.path.join(base, *resource_name.split('/')) - return base - - @staticmethod - def _validate_resource_path(path): - """ - Validate the resource paths according to the docs. - https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access - - >>> warned = getfixture('recwarn') - >>> warnings.simplefilter('always') - >>> vrp = NullProvider._validate_resource_path - >>> vrp('foo/bar.txt') - >>> bool(warned) - False - >>> vrp('../foo/bar.txt') - >>> bool(warned) - True - >>> warned.clear() - >>> vrp('/foo/bar.txt') - >>> bool(warned) - True - >>> vrp('foo/../../bar.txt') - >>> bool(warned) - True - >>> warned.clear() - >>> vrp('foo/f../bar.txt') - >>> bool(warned) - False - - Windows path separators are straight-up disallowed. - >>> vrp(r'\\foo/bar.txt') - Traceback (most recent call last): - ... - ValueError: Use of .. or absolute path in a resource path \ -is not allowed. - - >>> vrp(r'C:\\foo/bar.txt') - Traceback (most recent call last): - ... - ValueError: Use of .. or absolute path in a resource path \ -is not allowed. - - Blank values are allowed - - >>> vrp('') - >>> bool(warned) - False - - Non-string values are not. - - >>> vrp(None) - Traceback (most recent call last): - ... - AttributeError: ... - """ - invalid = ( - os.path.pardir in path.split(posixpath.sep) - or posixpath.isabs(path) - or ntpath.isabs(path) - ) - if not invalid: - return - - msg = "Use of .. or absolute path in a resource path is not allowed." - - # Aggressively disallow Windows absolute paths - if ntpath.isabs(path) and not posixpath.isabs(path): - raise ValueError(msg) - - # for compatibility, warn; in future - # raise ValueError(msg) - warnings.warn( - msg[:-1] + " and will raise exceptions in a future release.", - DeprecationWarning, - stacklevel=4, - ) - - def _get(self, path): - if hasattr(self.loader, 'get_data'): - return self.loader.get_data(path) - raise NotImplementedError( - "Can't perform this operation for loaders without 'get_data()'" - ) - - -register_loader_type(object, NullProvider) - - -def _parents(path): - """ - yield all parents of path including path - """ - last = None - while path != last: - yield path - last = path - path, _ = os.path.split(path) - - -class EggProvider(NullProvider): - """Provider based on a virtual filesystem""" - - def __init__(self, module): - super().__init__(module) - self._setup_prefix() - - def _setup_prefix(self): - # Assume that metadata may be nested inside a "basket" - # of multiple eggs and use module_path instead of .archive. - eggs = filter(_is_egg_path, _parents(self.module_path)) - egg = next(eggs, None) - egg and self._set_egg(egg) - - def _set_egg(self, path): - self.egg_name = os.path.basename(path) - self.egg_info = os.path.join(path, 'EGG-INFO') - self.egg_root = path - - -class DefaultProvider(EggProvider): - """Provides access to package resources in the filesystem""" - - def _has(self, path): - return os.path.exists(path) - - def _isdir(self, path): - return os.path.isdir(path) - - def _listdir(self, path): - return os.listdir(path) - - def get_resource_stream(self, manager, resource_name): - return open(self._fn(self.module_path, resource_name), 'rb') - - def _get(self, path): - with open(path, 'rb') as stream: - return stream.read() - - @classmethod - def _register(cls): - loader_names = ( - 'SourceFileLoader', - 'SourcelessFileLoader', - ) - for name in loader_names: - loader_cls = getattr(importlib_machinery, name, type(None)) - register_loader_type(loader_cls, cls) - - -DefaultProvider._register() - - -class EmptyProvider(NullProvider): - """Provider that returns nothing for all requests""" - - module_path = None - - _isdir = _has = lambda self, path: False - - def _get(self, path): - return '' - - def _listdir(self, path): - return [] - - def __init__(self): - pass - - -empty_provider = EmptyProvider() - - -class ZipManifests(dict): - """ - zip manifest builder - """ - - @classmethod - def build(cls, path): - """ - Build a dictionary similar to the zipimport directory - caches, except instead of tuples, store ZipInfo objects. - - Use a platform-specific path separator (os.sep) for the path keys - for compatibility with pypy on Windows. - """ - with zipfile.ZipFile(path) as zfile: - items = ( - ( - name.replace('/', os.sep), - zfile.getinfo(name), - ) - for name in zfile.namelist() - ) - return dict(items) - - load = build - - -class MemoizedZipManifests(ZipManifests): - """ - Memoized zipfile manifests. - """ - - manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') - - def load(self, path): - """ - Load a manifest at path or return a suitable manifest already loaded. - """ - path = os.path.normpath(path) - mtime = os.stat(path).st_mtime - - if path not in self or self[path].mtime != mtime: - manifest = self.build(path) - self[path] = self.manifest_mod(manifest, mtime) - - return self[path].manifest - - -class ZipProvider(EggProvider): - """Resource support for zips and eggs""" - - eagers = None - _zip_manifests = MemoizedZipManifests() - - def __init__(self, module): - super().__init__(module) - self.zip_pre = self.loader.archive + os.sep - - def _zipinfo_name(self, fspath): - # Convert a virtual filename (full path to file) into a zipfile subpath - # usable with the zipimport directory cache for our target archive - fspath = fspath.rstrip(os.sep) - if fspath == self.loader.archive: - return '' - if fspath.startswith(self.zip_pre): - return fspath[len(self.zip_pre) :] - raise AssertionError("%s is not a subpath of %s" % (fspath, self.zip_pre)) - - def _parts(self, zip_path): - # Convert a zipfile subpath into an egg-relative path part list. - # pseudo-fs path - fspath = self.zip_pre + zip_path - if fspath.startswith(self.egg_root + os.sep): - return fspath[len(self.egg_root) + 1 :].split(os.sep) - raise AssertionError("%s is not a subpath of %s" % (fspath, self.egg_root)) - - @property - def zipinfo(self): - return self._zip_manifests.load(self.loader.archive) - - def get_resource_filename(self, manager, resource_name): - if not self.egg_name: - raise NotImplementedError( - "resource_filename() only supported for .egg, not .zip" - ) - # no need to lock for extraction, since we use temp names - zip_path = self._resource_to_zip(resource_name) - eagers = self._get_eager_resources() - if '/'.join(self._parts(zip_path)) in eagers: - for name in eagers: - self._extract_resource(manager, self._eager_to_zip(name)) - return self._extract_resource(manager, zip_path) - - @staticmethod - def _get_date_and_size(zip_stat): - size = zip_stat.file_size - # ymdhms+wday, yday, dst - date_time = zip_stat.date_time + (0, 0, -1) - # 1980 offset already done - timestamp = time.mktime(date_time) - return timestamp, size - - # FIXME: 'ZipProvider._extract_resource' is too complex (12) - def _extract_resource(self, manager, zip_path): # noqa: C901 - if zip_path in self._index(): - for name in self._index()[zip_path]: - last = self._extract_resource(manager, os.path.join(zip_path, name)) - # return the extracted directory name - return os.path.dirname(last) - - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - - if not WRITE_SUPPORT: - raise IOError( - '"os.rename" and "os.unlink" are not supported ' 'on this platform' - ) - try: - real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path)) - - if self._is_current(real_path, zip_path): - return real_path - - outf, tmpnam = _mkstemp( - ".$extract", - dir=os.path.dirname(real_path), - ) - os.write(outf, self.loader.get_data(zip_path)) - os.close(outf) - utime(tmpnam, (timestamp, timestamp)) - manager.postprocess(tmpnam, real_path) - - try: - rename(tmpnam, real_path) - - except os.error: - if os.path.isfile(real_path): - if self._is_current(real_path, zip_path): - # the file became current since it was checked above, - # so proceed. - return real_path - # Windows, del old file and retry - elif os.name == 'nt': - unlink(real_path) - rename(tmpnam, real_path) - return real_path - raise - - except os.error: - # report a user-friendly error - manager.extraction_error() - - return real_path - - def _is_current(self, file_path, zip_path): - """ - Return True if the file_path is current for this zip_path - """ - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - if not os.path.isfile(file_path): - return False - stat = os.stat(file_path) - if stat.st_size != size or stat.st_mtime != timestamp: - return False - # check that the contents match - zip_contents = self.loader.get_data(zip_path) - with open(file_path, 'rb') as f: - file_contents = f.read() - return zip_contents == file_contents - - def _get_eager_resources(self): - if self.eagers is None: - eagers = [] - for name in ('native_libs.txt', 'eager_resources.txt'): - if self.has_metadata(name): - eagers.extend(self.get_metadata_lines(name)) - self.eagers = eagers - return self.eagers - - def _index(self): - try: - return self._dirindex - except AttributeError: - ind = {} - for path in self.zipinfo: - parts = path.split(os.sep) - while parts: - parent = os.sep.join(parts[:-1]) - if parent in ind: - ind[parent].append(parts[-1]) - break - else: - ind[parent] = [parts.pop()] - self._dirindex = ind - return ind - - def _has(self, fspath): - zip_path = self._zipinfo_name(fspath) - return zip_path in self.zipinfo or zip_path in self._index() - - def _isdir(self, fspath): - return self._zipinfo_name(fspath) in self._index() - - def _listdir(self, fspath): - return list(self._index().get(self._zipinfo_name(fspath), ())) - - def _eager_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.egg_root, resource_name)) - - def _resource_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.module_path, resource_name)) - - -register_loader_type(zipimport.zipimporter, ZipProvider) - - -class FileMetadata(EmptyProvider): - """Metadata handler for standalone PKG-INFO files - - Usage:: - - metadata = FileMetadata("/path/to/PKG-INFO") - - This provider rejects all data and metadata requests except for PKG-INFO, - which is treated as existing, and will be the contents of the file at - the provided location. - """ - - def __init__(self, path): - self.path = path - - def _get_metadata_path(self, name): - return self.path - - def has_metadata(self, name): - return name == 'PKG-INFO' and os.path.isfile(self.path) - - def get_metadata(self, name): - if name != 'PKG-INFO': - raise KeyError("No metadata except PKG-INFO is available") - - with io.open(self.path, encoding='utf-8', errors="replace") as f: - metadata = f.read() - self._warn_on_replacement(metadata) - return metadata - - def _warn_on_replacement(self, metadata): - replacement_char = '�' - if replacement_char in metadata: - tmpl = "{self.path} could not be properly decoded in UTF-8" - msg = tmpl.format(**locals()) - warnings.warn(msg) - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - -class PathMetadata(DefaultProvider): - """Metadata provider for egg directories - - Usage:: - - # Development eggs: - - egg_info = "/path/to/PackageName.egg-info" - base_dir = os.path.dirname(egg_info) - metadata = PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - dist = Distribution(basedir, project_name=dist_name, metadata=metadata) - - # Unpacked egg directories: - - egg_path = "/path/to/PackageName-ver-pyver-etc.egg" - metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) - dist = Distribution.from_filename(egg_path, metadata=metadata) - """ - - def __init__(self, path, egg_info): - self.module_path = path - self.egg_info = egg_info - - -class EggMetadata(ZipProvider): - """Metadata provider for .egg files""" - - def __init__(self, importer): - """Create a metadata provider from a zipimporter""" - - self.zip_pre = importer.archive + os.sep - self.loader = importer - if importer.prefix: - self.module_path = os.path.join(importer.archive, importer.prefix) - else: - self.module_path = importer.archive - self._setup_prefix() - - -_declare_state('dict', _distribution_finders={}) - - -def register_finder(importer_type, distribution_finder): - """Register `distribution_finder` to find distributions in sys.path items - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `distribution_finder` is a callable that, passed a path - item and the importer instance, yields ``Distribution`` instances found on - that path item. See ``pkg_resources.find_on_path`` for an example.""" - _distribution_finders[importer_type] = distribution_finder - - -def find_distributions(path_item, only=False): - """Yield distributions accessible via `path_item`""" - importer = get_importer(path_item) - finder = _find_adapter(_distribution_finders, importer) - return finder(importer, path_item, only) - - -def find_eggs_in_zip(importer, path_item, only=False): - """ - Find eggs in zip files; possibly multiple nested eggs. - """ - if importer.archive.endswith('.whl'): - # wheels are not supported with this finder - # they don't have PKG-INFO metadata, and won't ever contain eggs - return - metadata = EggMetadata(importer) - if metadata.has_metadata('PKG-INFO'): - yield Distribution.from_filename(path_item, metadata=metadata) - if only: - # don't yield nested distros - return - for subitem in metadata.resource_listdir(''): - if _is_egg_path(subitem): - subpath = os.path.join(path_item, subitem) - dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) - for dist in dists: - yield dist - elif subitem.lower().endswith(('.dist-info', '.egg-info')): - subpath = os.path.join(path_item, subitem) - submeta = EggMetadata(zipimport.zipimporter(subpath)) - submeta.egg_info = subpath - yield Distribution.from_location(path_item, subitem, submeta) - - -register_finder(zipimport.zipimporter, find_eggs_in_zip) - - -def find_nothing(importer, path_item, only=False): - return () - - -register_finder(object, find_nothing) - - -def find_on_path(importer, path_item, only=False): - """Yield distributions accessible on a sys.path directory""" - path_item = _normalize_cached(path_item) - - if _is_unpacked_egg(path_item): - yield Distribution.from_filename( - path_item, - metadata=PathMetadata(path_item, os.path.join(path_item, 'EGG-INFO')), - ) - return - - entries = (os.path.join(path_item, child) for child in safe_listdir(path_item)) - - # scan for .egg and .egg-info in directory - for entry in sorted(entries): - fullpath = os.path.join(path_item, entry) - factory = dist_factory(path_item, entry, only) - for dist in factory(fullpath): - yield dist - - -def dist_factory(path_item, entry, only): - """Return a dist_factory for the given entry.""" - lower = entry.lower() - is_egg_info = lower.endswith('.egg-info') - is_dist_info = lower.endswith('.dist-info') and os.path.isdir( - os.path.join(path_item, entry) - ) - is_meta = is_egg_info or is_dist_info - return ( - distributions_from_metadata - if is_meta - else find_distributions - if not only and _is_egg_path(entry) - else resolve_egg_link - if not only and lower.endswith('.egg-link') - else NoDists() - ) - - -class NoDists: - """ - >>> bool(NoDists()) - False - - >>> list(NoDists()('anything')) - [] - """ - - def __bool__(self): - return False - - def __call__(self, fullpath): - return iter(()) - - -def safe_listdir(path): - """ - Attempt to list contents of path, but suppress some exceptions. - """ - try: - return os.listdir(path) - except (PermissionError, NotADirectoryError): - pass - except OSError as e: - # Ignore the directory if does not exist, not a directory or - # permission denied - if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT): - raise - return () - - -def distributions_from_metadata(path): - root = os.path.dirname(path) - if os.path.isdir(path): - if len(os.listdir(path)) == 0: - # empty metadata dir; skip - return - metadata = PathMetadata(root, path) - else: - metadata = FileMetadata(path) - entry = os.path.basename(path) - yield Distribution.from_location( - root, - entry, - metadata, - precedence=DEVELOP_DIST, - ) - - -def non_empty_lines(path): - """ - Yield non-empty lines from file at path - """ - with open(path) as f: - for line in f: - line = line.strip() - if line: - yield line - - -def resolve_egg_link(path): - """ - Given a path to an .egg-link, resolve distributions - present in the referenced path. - """ - referenced_paths = non_empty_lines(path) - resolved_paths = ( - os.path.join(os.path.dirname(path), ref) for ref in referenced_paths - ) - dist_groups = map(find_distributions, resolved_paths) - return next(dist_groups, ()) - - -if hasattr(pkgutil, 'ImpImporter'): - register_finder(pkgutil.ImpImporter, find_on_path) - -register_finder(importlib_machinery.FileFinder, find_on_path) - -_declare_state('dict', _namespace_handlers={}) -_declare_state('dict', _namespace_packages={}) - - -def register_namespace_handler(importer_type, namespace_handler): - """Register `namespace_handler` to declare namespace packages - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `namespace_handler` is a callable like this:: - - def namespace_handler(importer, path_entry, moduleName, module): - # return a path_entry to use for child packages - - Namespace handlers are only called if the importer object has already - agreed that it can handle the relevant path item, and they should only - return a subpath if the module __path__ does not already contain an - equivalent subpath. For an example namespace handler, see - ``pkg_resources.file_ns_handler``. - """ - _namespace_handlers[importer_type] = namespace_handler - - -def _handle_ns(packageName, path_item): - """Ensure that named package includes a subpath of path_item (if needed)""" - - importer = get_importer(path_item) - if importer is None: - return None - - # use find_spec (PEP 451) and fall-back to find_module (PEP 302) - try: - spec = importer.find_spec(packageName) - except AttributeError: - # capture warnings due to #1111 - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - loader = importer.find_module(packageName) - else: - loader = spec.loader if spec else None - - if loader is None: - return None - module = sys.modules.get(packageName) - if module is None: - module = sys.modules[packageName] = types.ModuleType(packageName) - module.__path__ = [] - _set_parent_ns(packageName) - elif not hasattr(module, '__path__'): - raise TypeError("Not a package:", packageName) - handler = _find_adapter(_namespace_handlers, importer) - subpath = handler(importer, path_item, packageName, module) - if subpath is not None: - path = module.__path__ - path.append(subpath) - importlib.import_module(packageName) - _rebuild_mod_path(path, packageName, module) - return subpath - - -def _rebuild_mod_path(orig_path, package_name, module): - """ - Rebuild module.__path__ ensuring that all entries are ordered - corresponding to their sys.path order - """ - sys_path = [_normalize_cached(p) for p in sys.path] - - def safe_sys_path_index(entry): - """ - Workaround for #520 and #513. - """ - try: - return sys_path.index(entry) - except ValueError: - return float('inf') - - def position_in_sys_path(path): - """ - Return the ordinal of the path based on its position in sys.path - """ - path_parts = path.split(os.sep) - module_parts = package_name.count('.') + 1 - parts = path_parts[:-module_parts] - return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) - - new_path = sorted(orig_path, key=position_in_sys_path) - new_path = [_normalize_cached(p) for p in new_path] - - if isinstance(module.__path__, list): - module.__path__[:] = new_path - else: - module.__path__ = new_path - - -def declare_namespace(packageName): - """Declare that package 'packageName' is a namespace package""" - - msg = ( - f"Deprecated call to `pkg_resources.declare_namespace({packageName!r})`.\n" - "Implementing implicit namespace packages (as specified in PEP 420) " - "is preferred to `pkg_resources.declare_namespace`. " - "See https://setuptools.pypa.io/en/latest/references/" - "keywords.html#keyword-namespace-packages" - ) - warnings.warn(msg, DeprecationWarning, stacklevel=2) - - _imp.acquire_lock() - try: - if packageName in _namespace_packages: - return - - path = sys.path - parent, _, _ = packageName.rpartition('.') - - if parent: - declare_namespace(parent) - if parent not in _namespace_packages: - __import__(parent) - try: - path = sys.modules[parent].__path__ - except AttributeError as e: - raise TypeError("Not a package:", parent) from e - - # Track what packages are namespaces, so when new path items are added, - # they can be updated - _namespace_packages.setdefault(parent or None, []).append(packageName) - _namespace_packages.setdefault(packageName, []) - - for path_item in path: - # Ensure all the parent's path items are reflected in the child, - # if they apply - _handle_ns(packageName, path_item) - - finally: - _imp.release_lock() - - -def fixup_namespace_packages(path_item, parent=None): - """Ensure that previously-declared namespace packages include path_item""" - _imp.acquire_lock() - try: - for package in _namespace_packages.get(parent, ()): - subpath = _handle_ns(package, path_item) - if subpath: - fixup_namespace_packages(subpath, package) - finally: - _imp.release_lock() - - -def file_ns_handler(importer, path_item, packageName, module): - """Compute an ns-package subpath for a filesystem or zipfile importer""" - - subpath = os.path.join(path_item, packageName.split('.')[-1]) - normalized = _normalize_cached(subpath) - for item in module.__path__: - if _normalize_cached(item) == normalized: - break - else: - # Only return the path if it's not already there - return subpath - - -if hasattr(pkgutil, 'ImpImporter'): - register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) - -register_namespace_handler(zipimport.zipimporter, file_ns_handler) -register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) - - -def null_ns_handler(importer, path_item, packageName, module): - return None - - -register_namespace_handler(object, null_ns_handler) - - -def normalize_path(filename): - """Normalize a file/dir name for comparison purposes""" - return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename)))) - - -def _cygwin_patch(filename): # pragma: nocover - """ - Contrary to POSIX 2008, on Cygwin, getcwd (3) contains - symlink components. Using - os.path.abspath() works around this limitation. A fix in os.getcwd() - would probably better, in Cygwin even more so, except - that this seems to be by design... - """ - return os.path.abspath(filename) if sys.platform == 'cygwin' else filename - - -def _normalize_cached(filename, _cache={}): - try: - return _cache[filename] - except KeyError: - _cache[filename] = result = normalize_path(filename) - return result - - -def _is_egg_path(path): - """ - Determine if given path appears to be an egg. - """ - return _is_zip_egg(path) or _is_unpacked_egg(path) - - -def _is_zip_egg(path): - return ( - path.lower().endswith('.egg') - and os.path.isfile(path) - and zipfile.is_zipfile(path) - ) - - -def _is_unpacked_egg(path): - """ - Determine if given path appears to be an unpacked egg. - """ - return path.lower().endswith('.egg') and os.path.isfile( - os.path.join(path, 'EGG-INFO', 'PKG-INFO') - ) - - -def _set_parent_ns(packageName): - parts = packageName.split('.') - name = parts.pop() - if parts: - parent = '.'.join(parts) - setattr(sys.modules[parent], name, sys.modules[packageName]) - - -MODULE = re.compile(r"\w+(\.\w+)*$").match -EGG_NAME = re.compile( - r""" - (?P[^-]+) ( - -(?P[^-]+) ( - -py(?P[^-]+) ( - -(?P.+) - )? - )? - )? - """, - re.VERBOSE | re.IGNORECASE, -).match - - -class EntryPoint: - """Object representing an advertised importable object""" - - def __init__(self, name, module_name, attrs=(), extras=(), dist=None): - if not MODULE(module_name): - raise ValueError("Invalid module name", module_name) - self.name = name - self.module_name = module_name - self.attrs = tuple(attrs) - self.extras = tuple(extras) - self.dist = dist - - def __str__(self): - s = "%s = %s" % (self.name, self.module_name) - if self.attrs: - s += ':' + '.'.join(self.attrs) - if self.extras: - s += ' [%s]' % ','.join(self.extras) - return s - - def __repr__(self): - return "EntryPoint.parse(%r)" % str(self) - - def load(self, require=True, *args, **kwargs): - """ - Require packages for this EntryPoint, then resolve it. - """ - if not require or args or kwargs: - warnings.warn( - "Parameters to load are deprecated. Call .resolve and " - ".require separately.", - PkgResourcesDeprecationWarning, - stacklevel=2, - ) - if require: - self.require(*args, **kwargs) - return self.resolve() - - def resolve(self): - """ - Resolve the entry point from its module and attrs. - """ - module = __import__(self.module_name, fromlist=['__name__'], level=0) - try: - return functools.reduce(getattr, self.attrs, module) - except AttributeError as exc: - raise ImportError(str(exc)) from exc - - def require(self, env=None, installer=None): - if self.extras and not self.dist: - raise UnknownExtra("Can't require() without a distribution", self) - - # Get the requirements for this entry point with all its extras and - # then resolve them. We have to pass `extras` along when resolving so - # that the working set knows what extras we want. Otherwise, for - # dist-info distributions, the working set will assume that the - # requirements for that extra are purely optional and skip over them. - reqs = self.dist.requires(self.extras) - items = working_set.resolve(reqs, env, installer, extras=self.extras) - list(map(working_set.add, items)) - - pattern = re.compile( - r'\s*' - r'(?P.+?)\s*' - r'=\s*' - r'(?P[\w.]+)\s*' - r'(:\s*(?P[\w.]+))?\s*' - r'(?P\[.*\])?\s*$' - ) - - @classmethod - def parse(cls, src, dist=None): - """Parse a single entry point from string `src` - - Entry point syntax follows the form:: - - name = some.module:some.attr [extra1, extra2] - - The entry name and module name are required, but the ``:attrs`` and - ``[extras]`` parts are optional - """ - m = cls.pattern.match(src) - if not m: - msg = "EntryPoint must be in 'name=module:attrs [extras]' format" - raise ValueError(msg, src) - res = m.groupdict() - extras = cls._parse_extras(res['extras']) - attrs = res['attr'].split('.') if res['attr'] else () - return cls(res['name'], res['module'], attrs, extras, dist) - - @classmethod - def _parse_extras(cls, extras_spec): - if not extras_spec: - return () - req = Requirement.parse('x' + extras_spec) - if req.specs: - raise ValueError() - return req.extras - - @classmethod - def parse_group(cls, group, lines, dist=None): - """Parse an entry point group""" - if not MODULE(group): - raise ValueError("Invalid group name", group) - this = {} - for line in yield_lines(lines): - ep = cls.parse(line, dist) - if ep.name in this: - raise ValueError("Duplicate entry point", group, ep.name) - this[ep.name] = ep - return this - - @classmethod - def parse_map(cls, data, dist=None): - """Parse a map of entry point groups""" - if isinstance(data, dict): - data = data.items() - else: - data = split_sections(data) - maps = {} - for group, lines in data: - if group is None: - if not lines: - continue - raise ValueError("Entry points must be listed in groups") - group = group.strip() - if group in maps: - raise ValueError("Duplicate group name", group) - maps[group] = cls.parse_group(group, lines, dist) - return maps - - -def _version_from_file(lines): - """ - Given an iterable of lines from a Metadata file, return - the value of the Version field, if present, or None otherwise. - """ - - def is_version_line(line): - return line.lower().startswith('version:') - - version_lines = filter(is_version_line, lines) - line = next(iter(version_lines), '') - _, _, value = line.partition(':') - return safe_version(value.strip()) or None - - -class Distribution: - """Wrap an actual or potential sys.path entry w/metadata""" - - PKG_INFO = 'PKG-INFO' - - def __init__( - self, - location=None, - metadata=None, - project_name=None, - version=None, - py_version=PY_MAJOR, - platform=None, - precedence=EGG_DIST, - ): - self.project_name = safe_name(project_name or 'Unknown') - if version is not None: - self._version = safe_version(version) - self.py_version = py_version - self.platform = platform - self.location = location - self.precedence = precedence - self._provider = metadata or empty_provider - - @classmethod - def from_location(cls, location, basename, metadata=None, **kw): - project_name, version, py_version, platform = [None] * 4 - basename, ext = os.path.splitext(basename) - if ext.lower() in _distributionImpl: - cls = _distributionImpl[ext.lower()] - - match = EGG_NAME(basename) - if match: - project_name, version, py_version, platform = match.group( - 'name', 'ver', 'pyver', 'plat' - ) - return cls( - location, - metadata, - project_name=project_name, - version=version, - py_version=py_version, - platform=platform, - **kw, - )._reload_version() - - def _reload_version(self): - return self - - @property - def hashcmp(self): - return ( - self._forgiving_parsed_version, - self.precedence, - self.key, - self.location, - self.py_version or '', - self.platform or '', - ) - - def __hash__(self): - return hash(self.hashcmp) - - def __lt__(self, other): - return self.hashcmp < other.hashcmp - - def __le__(self, other): - return self.hashcmp <= other.hashcmp - - def __gt__(self, other): - return self.hashcmp > other.hashcmp - - def __ge__(self, other): - return self.hashcmp >= other.hashcmp - - def __eq__(self, other): - if not isinstance(other, self.__class__): - # It's not a Distribution, so they are not equal - return False - return self.hashcmp == other.hashcmp - - def __ne__(self, other): - return not self == other - - # These properties have to be lazy so that we don't have to load any - # metadata until/unless it's actually needed. (i.e., some distributions - # may not know their name or version without loading PKG-INFO) - - @property - def key(self): - try: - return self._key - except AttributeError: - self._key = key = self.project_name.lower() - return key - - @property - def parsed_version(self): - if not hasattr(self, "_parsed_version"): - try: - self._parsed_version = parse_version(self.version) - except packaging.version.InvalidVersion as ex: - info = f"(package: {self.project_name})" - if hasattr(ex, "add_note"): - ex.add_note(info) # PEP 678 - raise - raise packaging.version.InvalidVersion(f"{str(ex)} {info}") from None - - return self._parsed_version - - @property - def _forgiving_parsed_version(self): - try: - return self.parsed_version - except packaging.version.InvalidVersion as ex: - self._parsed_version = parse_version(_forgiving_version(self.version)) - - notes = "\n".join(getattr(ex, "__notes__", [])) # PEP 678 - msg = f"""!!\n\n - ************************************************************************* - {str(ex)}\n{notes} - - This is a long overdue deprecation. - For the time being, `pkg_resources` will use `{self._parsed_version}` - as a replacement to avoid breaking existing environments, - but no future compatibility is guaranteed. - - If you maintain package {self.project_name} you should implement - the relevant changes to adequate the project to PEP 440 immediately. - ************************************************************************* - \n\n!! - """ - warnings.warn(msg, DeprecationWarning) - - return self._parsed_version - - @property - def version(self): - try: - return self._version - except AttributeError as e: - version = self._get_version() - if version is None: - path = self._get_metadata_path_for_display(self.PKG_INFO) - msg = ("Missing 'Version:' header and/or {} file at path: {}").format( - self.PKG_INFO, path - ) - raise ValueError(msg, self) from e - - return version - - @property - def _dep_map(self): - """ - A map of extra to its list of (direct) requirements - for this distribution, including the null extra. - """ - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._filter_extras(self._build_dep_map()) - return self.__dep_map - - @staticmethod - def _filter_extras(dm): - """ - Given a mapping of extras to dependencies, strip off - environment markers and filter out any dependencies - not matching the markers. - """ - for extra in list(filter(None, dm)): - new_extra = extra - reqs = dm.pop(extra) - new_extra, _, marker = extra.partition(':') - fails_marker = marker and ( - invalid_marker(marker) or not evaluate_marker(marker) - ) - if fails_marker: - reqs = [] - new_extra = safe_extra(new_extra) or None - - dm.setdefault(new_extra, []).extend(reqs) - return dm - - def _build_dep_map(self): - dm = {} - for name in 'requires.txt', 'depends.txt': - for extra, reqs in split_sections(self._get_metadata(name)): - dm.setdefault(extra, []).extend(parse_requirements(reqs)) - return dm - - def requires(self, extras=()): - """List of Requirements needed for this distro if `extras` are used""" - dm = self._dep_map - deps = [] - deps.extend(dm.get(None, ())) - for ext in extras: - try: - deps.extend(dm[safe_extra(ext)]) - except KeyError as e: - raise UnknownExtra( - "%s has no such extra feature %r" % (self, ext) - ) from e - return deps - - def _get_metadata_path_for_display(self, name): - """ - Return the path to the given metadata file, if available. - """ - try: - # We need to access _get_metadata_path() on the provider object - # directly rather than through this class's __getattr__() - # since _get_metadata_path() is marked private. - path = self._provider._get_metadata_path(name) - - # Handle exceptions e.g. in case the distribution's metadata - # provider doesn't support _get_metadata_path(). - except Exception: - return '[could not detect]' - - return path - - def _get_metadata(self, name): - if self.has_metadata(name): - for line in self.get_metadata_lines(name): - yield line - - def _get_version(self): - lines = self._get_metadata(self.PKG_INFO) - version = _version_from_file(lines) - - return version - - def activate(self, path=None, replace=False): - """Ensure distribution is importable on `path` (default=sys.path)""" - if path is None: - path = sys.path - self.insert_on(path, replace=replace) - if path is sys.path: - fixup_namespace_packages(self.location) - for pkg in self._get_metadata('namespace_packages.txt'): - if pkg in sys.modules: - declare_namespace(pkg) - - def egg_name(self): - """Return what this distribution's standard .egg filename should be""" - filename = "%s-%s-py%s" % ( - to_filename(self.project_name), - to_filename(self.version), - self.py_version or PY_MAJOR, - ) - - if self.platform: - filename += '-' + self.platform - return filename - - def __repr__(self): - if self.location: - return "%s (%s)" % (self, self.location) - else: - return str(self) - - def __str__(self): - try: - version = getattr(self, 'version', None) - except ValueError: - version = None - version = version or "[unknown version]" - return "%s %s" % (self.project_name, version) - - def __getattr__(self, attr): - """Delegate all unrecognized public attributes to .metadata provider""" - if attr.startswith('_'): - raise AttributeError(attr) - return getattr(self._provider, attr) - - def __dir__(self): - return list( - set(super(Distribution, self).__dir__()) - | set(attr for attr in self._provider.__dir__() if not attr.startswith('_')) - ) - - @classmethod - def from_filename(cls, filename, metadata=None, **kw): - return cls.from_location( - _normalize_cached(filename), os.path.basename(filename), metadata, **kw - ) - - def as_requirement(self): - """Return a ``Requirement`` that matches this distribution exactly""" - if isinstance(self.parsed_version, packaging.version.Version): - spec = "%s==%s" % (self.project_name, self.parsed_version) - else: - spec = "%s===%s" % (self.project_name, self.parsed_version) - - return Requirement.parse(spec) - - def load_entry_point(self, group, name): - """Return the `name` entry point of `group` or raise ImportError""" - ep = self.get_entry_info(group, name) - if ep is None: - raise ImportError("Entry point %r not found" % ((group, name),)) - return ep.load() - - def get_entry_map(self, group=None): - """Return the entry point map for `group`, or the full entry map""" - try: - ep_map = self._ep_map - except AttributeError: - ep_map = self._ep_map = EntryPoint.parse_map( - self._get_metadata('entry_points.txt'), self - ) - if group is not None: - return ep_map.get(group, {}) - return ep_map - - def get_entry_info(self, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return self.get_entry_map(group).get(name) - - # FIXME: 'Distribution.insert_on' is too complex (13) - def insert_on(self, path, loc=None, replace=False): # noqa: C901 - """Ensure self.location is on path - - If replace=False (default): - - If location is already in path anywhere, do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent. - - Else: add to the end of path. - If replace=True: - - If location is already on path anywhere (not eggs) - or higher priority than its parent (eggs) - do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent, - removing any lower-priority entries. - - Else: add it to the front of path. - """ - - loc = loc or self.location - if not loc: - return - - nloc = _normalize_cached(loc) - bdir = os.path.dirname(nloc) - npath = [(p and _normalize_cached(p) or p) for p in path] - - for p, item in enumerate(npath): - if item == nloc: - if replace: - break - else: - # don't modify path (even removing duplicates) if - # found and not replace - return - elif item == bdir and self.precedence == EGG_DIST: - # if it's an .egg, give it precedence over its directory - # UNLESS it's already been added to sys.path and replace=False - if (not replace) and nloc in npath[p:]: - return - if path is sys.path: - self.check_version_conflict() - path.insert(p, loc) - npath.insert(p, nloc) - break - else: - if path is sys.path: - self.check_version_conflict() - if replace: - path.insert(0, loc) - else: - path.append(loc) - return - - # p is the spot where we found or inserted loc; now remove duplicates - while True: - try: - np = npath.index(nloc, p + 1) - except ValueError: - break - else: - del npath[np], path[np] - # ha! - p = np - - return - - def check_version_conflict(self): - if self.key == 'setuptools': - # ignore the inevitable setuptools self-conflicts :( - return - - nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) - loc = normalize_path(self.location) - for modname in self._get_metadata('top_level.txt'): - if ( - modname not in sys.modules - or modname in nsp - or modname in _namespace_packages - ): - continue - if modname in ('pkg_resources', 'setuptools', 'site'): - continue - fn = getattr(sys.modules[modname], '__file__', None) - if fn and ( - normalize_path(fn).startswith(loc) or fn.startswith(self.location) - ): - continue - issue_warning( - "Module %s was already imported from %s, but %s is being added" - " to sys.path" % (modname, fn, self.location), - ) - - def has_version(self): - try: - self.version - except ValueError: - issue_warning("Unbuilt egg for " + repr(self)) - return False - except SystemError: - # TODO: remove this except clause when python/cpython#103632 is fixed. - return False - return True - - def clone(self, **kw): - """Copy this distribution, substituting in any changed keyword args""" - names = 'project_name version py_version platform location precedence' - for attr in names.split(): - kw.setdefault(attr, getattr(self, attr, None)) - kw.setdefault('metadata', self._provider) - return self.__class__(**kw) - - @property - def extras(self): - return [dep for dep in self._dep_map if dep] - - -class EggInfoDistribution(Distribution): - def _reload_version(self): - """ - Packages installed by distutils (e.g. numpy or scipy), - which uses an old safe_version, and so - their version numbers can get mangled when - converted to filenames (e.g., 1.11.0.dev0+2329eae to - 1.11.0.dev0_2329eae). These distributions will not be - parsed properly - downstream by Distribution and safe_version, so - take an extra step and try to get the version number from - the metadata file itself instead of the filename. - """ - md_version = self._get_version() - if md_version: - self._version = md_version - return self - - -class DistInfoDistribution(Distribution): - """ - Wrap an actual or potential sys.path entry - w/metadata, .dist-info style. - """ - - PKG_INFO = 'METADATA' - EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") - - @property - def _parsed_pkg_info(self): - """Parse and cache metadata""" - try: - return self._pkg_info - except AttributeError: - metadata = self.get_metadata(self.PKG_INFO) - self._pkg_info = email.parser.Parser().parsestr(metadata) - return self._pkg_info - - @property - def _dep_map(self): - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._compute_dependencies() - return self.__dep_map - - def _compute_dependencies(self): - """Recompute this distribution's dependencies.""" - dm = self.__dep_map = {None: []} - - reqs = [] - # Including any condition expressions - for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: - reqs.extend(parse_requirements(req)) - - def reqs_for_extra(extra): - for req in reqs: - if not req.marker or req.marker.evaluate({'extra': extra}): - yield req - - common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None))) - dm[None].extend(common) - - for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: - s_extra = safe_extra(extra.strip()) - dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common] - - return dm - - -_distributionImpl = { - '.egg': Distribution, - '.egg-info': EggInfoDistribution, - '.dist-info': DistInfoDistribution, -} - - -def issue_warning(*args, **kw): - level = 1 - g = globals() - try: - # find the first stack frame that is *not* code in - # the pkg_resources module, to use for the warning - while sys._getframe(level).f_globals is g: - level += 1 - except ValueError: - pass - warnings.warn(stacklevel=level + 1, *args, **kw) - - -def parse_requirements(strs): - """ - Yield ``Requirement`` objects for each specification in `strs`. - - `strs` must be a string, or a (possibly-nested) iterable thereof. - """ - return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs)))) - - -class RequirementParseError(packaging.requirements.InvalidRequirement): - "Compatibility wrapper for InvalidRequirement" - - -class Requirement(packaging.requirements.Requirement): - def __init__(self, requirement_string): - """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" - super(Requirement, self).__init__(requirement_string) - self.unsafe_name = self.name - project_name = safe_name(self.name) - self.project_name, self.key = project_name, project_name.lower() - self.specs = [(spec.operator, spec.version) for spec in self.specifier] - self.extras = tuple(map(safe_extra, self.extras)) - self.hashCmp = ( - self.key, - self.url, - self.specifier, - frozenset(self.extras), - str(self.marker) if self.marker else None, - ) - self.__hash = hash(self.hashCmp) - - def __eq__(self, other): - return isinstance(other, Requirement) and self.hashCmp == other.hashCmp - - def __ne__(self, other): - return not self == other - - def __contains__(self, item): - if isinstance(item, Distribution): - if item.key != self.key: - return False - - item = item.version - - # Allow prereleases always in order to match the previous behavior of - # this method. In the future this should be smarter and follow PEP 440 - # more accurately. - return self.specifier.contains(item, prereleases=True) - - def __hash__(self): - return self.__hash - - def __repr__(self): - return "Requirement.parse(%r)" % str(self) - - @staticmethod - def parse(s): - (req,) = parse_requirements(s) - return req - - -def _always_object(classes): - """ - Ensure object appears in the mro even - for old-style classes. - """ - if object not in classes: - return classes + (object,) - return classes - - -def _find_adapter(registry, ob): - """Return an adapter factory for `ob` from `registry`""" - types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) - for t in types: - if t in registry: - return registry[t] - - -def ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - os.makedirs(dirname, exist_ok=True) - - -def _bypass_ensure_directory(path): - """Sandbox-bypassing version of ensure_directory()""" - if not WRITE_SUPPORT: - raise IOError('"os.mkdir" not supported on this platform.') - dirname, filename = split(path) - if dirname and filename and not isdir(dirname): - _bypass_ensure_directory(dirname) - try: - mkdir(dirname, 0o755) - except FileExistsError: - pass - - -def split_sections(s): - """Split a string or iterable thereof into (section, content) pairs - - Each ``section`` is a stripped version of the section header ("[section]") - and each ``content`` is a list of stripped lines excluding blank lines and - comment-only lines. If there are any such lines before the first section - header, they're returned in a first ``section`` of ``None``. - """ - section = None - content = [] - for line in yield_lines(s): - if line.startswith("["): - if line.endswith("]"): - if section or content: - yield section, content - section = line[1:-1].strip() - content = [] - else: - raise ValueError("Invalid section heading", line) - else: - content.append(line) - - # wrap up last segment - yield section, content - - -def _mkstemp(*args, **kw): - old_open = os.open - try: - # temporarily bypass sandboxing - os.open = os_open - return tempfile.mkstemp(*args, **kw) - finally: - # and then put it back - os.open = old_open - - -# Silence the PEP440Warning by default, so that end users don't get hit by it -# randomly just because they use pkg_resources. We want to append the rule -# because we want earlier uses of filterwarnings to take precedence over this -# one. -warnings.filterwarnings("ignore", category=PEP440Warning, append=True) - - -# from jaraco.functools 1.3 -def _call_aside(f, *args, **kwargs): - f(*args, **kwargs) - return f - - -@_call_aside -def _initialize(g=globals()): - "Set up global resource manager (deliberately not state-saved)" - manager = ResourceManager() - g['_manager'] = manager - g.update( - (name, getattr(manager, name)) - for name in dir(manager) - if not name.startswith('_') - ) - - -class PkgResourcesDeprecationWarning(Warning): - """ - Base class for warning about deprecations in ``pkg_resources`` - - This class is not derived from ``DeprecationWarning``, and as such is - visible by default. - """ - - -@_call_aside -def _initialize_master_working_set(): - """ - Prepare the master working set and make the ``require()`` - API available. - - This function has explicit effects on the global state - of pkg_resources. It is intended to be invoked once at - the initialization of this module. - - Invocation by other packages is unsupported and done - at their own risk. - """ - working_set = WorkingSet._build_master() - _declare_state('object', working_set=working_set) - - require = working_set.require - iter_entry_points = working_set.iter_entry_points - add_activation_listener = working_set.subscribe - run_script = working_set.run_script - # backward compatibility - run_main = run_script - # Activate all distributions already on sys.path with replace=False and - # ensure that all distributions added to the working set in the future - # (e.g. by calling ``require()``) will get activated as well, - # with higher priority (replace=True). - tuple(dist.activate(replace=False) for dist in working_set) - add_activation_listener( - lambda dist: dist.activate(replace=True), - existing=False, - ) - working_set.entries = [] - # match order - list(map(working_set.add_entry, sys.path)) - globals().update(locals()) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_importlib.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_importlib.py deleted file mode 100644 index 819bf5d3c2454c0a1853cfb695ed904686e1deb1..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_importlib.py +++ /dev/null @@ -1,47 +0,0 @@ -import sys - - -def disable_importlib_metadata_finder(metadata): - """ - Ensure importlib_metadata doesn't provide older, incompatible - Distributions. - - Workaround for #3102. - """ - try: - import importlib_metadata - except ImportError: - return - except AttributeError: - import warnings - - msg = ( - "`importlib-metadata` version is incompatible with `setuptools`.\n" - "This problem is likely to be solved by installing an updated version of " - "`importlib-metadata`." - ) - warnings.warn(msg) # Ensure a descriptive message is shown. - raise # This exception can be suppressed by _distutils_hack - - if importlib_metadata is metadata: - return - to_remove = [ - ob - for ob in sys.meta_path - if isinstance(ob, importlib_metadata.MetadataPathFinder) - ] - for item in to_remove: - sys.meta_path.remove(item) - - -if sys.version_info < (3, 10): - from setuptools.extern import importlib_metadata as metadata - disable_importlib_metadata_finder(metadata) -else: - import importlib.metadata as metadata # noqa: F401 - - -if sys.version_info < (3, 9): - from setuptools.extern import importlib_resources as resources -else: - import importlib.resources as resources # noqa: F401 diff --git a/spaces/AzinZ/vitscn/commons.py b/spaces/AzinZ/vitscn/commons.py deleted file mode 100644 index 9ad0444b61cbadaa388619986c2889c707d873ce..0000000000000000000000000000000000000000 --- a/spaces/AzinZ/vitscn/commons.py +++ /dev/null @@ -1,161 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/Benson/text-generation/Examples/Assoluto Racing Mod Apk 1.9.1.md b/spaces/Benson/text-generation/Examples/Assoluto Racing Mod Apk 1.9.1.md deleted file mode 100644 index 925e9c6c18cd10f7e3dc5fed88d87a888d16314f..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Assoluto Racing Mod Apk 1.9.1.md +++ /dev/null @@ -1,124 +0,0 @@ -
-

Plague Inc 1.18 5 Mod Apk Việt Hóa: Cómo descargar y jugar el juego

-

Plague Inc es un popular juego de simulación que te permite crear y desarrollar un patógeno para acabar con la humanidad con una pandemia mortal. Pero ¿qué pasa si quieres jugar el juego con más características, más idiomas y más diversión? En este artículo, te mostraremos cómo descargar y jugar Plague Inc 1.18 5 Mod Apk Việt Hóa, una versión modificada del juego que ofrece muchas ventajas sobre la original.

-

¿Qué es Plague Inc?

-

Una breve introducción al juego y sus características

-

Plague Inc es un juego de simulación de estrategia en tiempo real desarrollado por Ndemic Creations. El juego fue inspirado por la película de 2011 Contagion y el juego de 2008 Flash Pandemic 2. El juego ha sido descargado más de 160 millones de veces a partir de mayo de 2021.

-

assoluto racing mod apk 1.9.1


Download File ===> https://bltlly.com/2v6JQZ



-

El juego te permite elegir entre diferentes modos de juego y patógenos, tales como bacterias, virus, hongos, parásitos, priones, nano-virus, armas biológicas, gusanos neuráxicos, virus necroa, gripe simia y plaga de sombras. Cada patógeno tiene sus propias características y estrategias para dominar.

-

Tu objetivo es infectar y matar a la población mundial con tu plaga, mientras te adaptas a diversos entornos y superas las defensas humanas. Puedes desarrollar tu plaga gastando puntos de ADN en transmisión, síntomas y habilidades. También puede desencadenar eventos aleatorios y eventos mundiales que pueden afectar la propagación y gravedad de su plaga.

- -

La diferencia entre la versión original y la versión modificada

-

Plague Inc 1.18 5 Mod Apk Việt Hóa es una versión modificada de Plague Inc que ofrece algunas ventajas sobre la versión original. Algunas de estas ventajas son:

-
    -
  • Desbloquea todo el contenido premium de forma gratuita, como genes, escenarios, trucos, plagas especiales.
  • -
  • Te da puntos de ADN ilimitados para evolucionar tu plaga más rápido.
  • -
  • Soporta el idioma vietnamita (việt hóa), así como el inglés y otros idiomas.
  • -
  • Tiene gráficos y efectos de sonido mejorados para una mejor experiencia de juego.
  • -
-

Cómo descargar e instalar Plague Inc 1.18 5 Mod Apk Việt Hóa

-

Los requisitos y pasos para descargar e instalar el mod apk

-

Para descargar e instalar Plague Inc 1.18 5 Mod Apk Việt Hóa, es necesario tener un dispositivo Android que cumple con los siguientes requisitos:

-
    -
  • Versión de Android 4.1 o superior.
  • -
  • Al menos 100 MB de espacio de almacenamiento libre.
  • -
  • Una conexión a Internet estable.
  • -
-

Una vez que haya comprobado la compatibilidad de su dispositivo, puede seguir estos pasos para descargar e instalar el apk mod:

-
    -
  1. Ir al enlace proporcionado a continuación para descargar el archivo apk mod.
  2. -
  3. Permita que su dispositivo instale aplicaciones desde fuentes desconocidas. Puede hacer esto yendo a Configuración > Seguridad > Fuentes desconocidas y habilitando la opción.
  4. -
  5. Busque el archivo apk mod descargado en el administrador de archivos de su dispositivo y toque en él para iniciar el proceso de instalación.
  6. -
  7. Siga las instrucciones en la pantalla para completar la instalación.
  8. -
  9. Iniciar el juego y disfrutar de jugar Plague Inc 1.18 5 Mod Apk Việt Hóa.
  10. -
-

El enlace para descargar el mod apk

-

Puede descargar Plague Inc 1.18 5 Mod Apk Việt Hóa desde este enlace: [Plague Inc 1.18 5 Mod Apk Việt Hóa]

-

Cómo Jugar Peste Inc 1.18 5 Mod Apk Việt Hóa

-

Los modos de juego y patógenos disponibles en el mod apk

- -
    -
  • juego principal: este es el modo estándar donde puede crear y evolucionar su propia plaga y tratar de infectar y matar al mundo.
  • -
  • Speed Run: Este es un modo temporizado donde tienes que infectar y matar al mundo lo más rápido posible.
  • -
  • Modo Co-Op: Este es un modo multijugador donde puedes formar equipo con otro jugador y trabajar juntos para infectar y matar al mundo.
  • -
  • Versus Mode: Este es un modo multijugador donde puedes competir con otro jugador y tratar de infectar y matar a más personas que ellos.
  • -
-

También puede elegir entre los siguientes patógenos:

-

- - -Patógeno -Descripción - - -Bacterias -El patógeno más común y bien redondeado. No tiene habilidades especiales pero puede evolucionar rápidamente. - - -Virus -Un patógeno que muta rápidamente que puede volverse difícil de curar. Tiene una alta probabilidad de desarrollar síntomas aleatorios, pero también puede volverse letal demasiado rápido. - - -Hongo -Un patógeno de propagación lenta que depende de las esporas para infectar nuevos países. Tiene una baja probabilidad de ser detectado, pero también puede luchar en climas cálidos. - - -Parásito -Un patógeno furtivo que puede evitar ser notado por los seres humanos. Tiene una gravedad baja, pero también puede reducir los puntos de ADN de los peligros biológicos rojos. - - -Prion -Un patógeno complejo que puede manipular el comportamiento de los humanos. Tiene una tasa de infección lenta, pero también puede desencadenar atrofia neuronal que hace que sea más difícil de curar. - - -Nano-Virus -Un patógeno sintético que se detecta desde el inicio del juego. Tiene una alta infectividad, pero también puede activar interruptores de eliminación que hacen que sea más fácil de curar. - - -Arma biológica -Un patógeno letal que puede matar a los humanos rápidamente. Tiene una gravedad alta pero también puede ser inestable y difícil de controlar. - - -Gusano de Neurax - - - -Virus de necrosis -Un virus creador de zombis que puede reanimar humanos muertos. Tiene un árbol de síntomas único y también puede desencadenar una respuesta militar global. - - -Gripe simia -Un virus genéticamente modificado que puede infectar tanto a humanos como a simios. Tiene un árbol de habilidades único y también puede desencadenar un levantamiento simio. - - -Shadow PlagueUn patógeno vampírico que puede crear vampiros e infectar humanos. Tiene un sistema único de sed de sangre y también puede desencadenar una respuesta templaria. - - -

Los consejos y trucos de juego para crear y propagar una plaga mortal

-

Plague Inc 1.18 5 Mod Apk Việt Hóa es un juego desafiante que requiere que pienses de forma estratégica y creativa para lograr tu objetivo de acabar con la humanidad. Estos son algunos consejos y trucos generales que pueden ayudarte a mejorar tu juego:

-
    -
  • Elija su patógeno y el modo de juego sabiamente. Los diferentes patógenos y modos de juego tienen diferentes fortalezas y debilidades, por lo que debes elegir el que se adapte a tu estilo de juego y estrategia.
  • -Comienza tu plaga en un país populoso y pobre. Esto le dará más puntos de ADN y más oportunidades para propagar su plaga a otros países. -
  • Equilibra tu transmisión, síntomas y habilidades. Necesitas desarrollar tu plaga de una manera que la haga más infecciosa, más severa y más resistente a diferentes factores, como el clima, la cura y la respuesta humana.
  • -
  • Cuidado con las noticias y los eventos mundiales. Estos pueden darle pistas sobre lo que está sucediendo en el mundo y cómo los seres humanos están reaccionando a su plaga. Puede utilizar esta información para ajustar su estrategia en consecuencia.
  • - -
-

Conclusión

-

Un resumen de los puntos principales y una recomendación para el juego

-

Plague Inc 1.18 5 Mod Apk Việt Hóa es un juego divertido y atractivo que le permite dar rienda suelta a su genio del mal interior y crear una pandemia global. El juego te ofrece muchas características, opciones y desafíos que lo hacen más agradable y realista que la versión original. Puede descargar e instalar el apk mod fácilmente desde el enlace proporcionado anteriormente. Si usted está buscando un juego que pone a prueba su creatividad, inteligencia y habilidades de estrategia, entonces Plague Inc 1.18 5 Mod Apk Việt Hóa es el juego para usted.

-

Preguntas frecuentes

-

Cinco preguntas y respuestas únicas sobre el juego y el apk mod

-
    -
  1. Q: ¿Es Plague Inc 1.18 5 Mod Apk Việt Hóa seguro para descargar y jugar?
  2. -
  3. A: Sí, Plague Inc 1.18 5 Mod Apk Việt Hóa es seguro para descargar y jugar. El archivo apk mod ha sido escaneado en busca de virus y malware y no tiene efectos dañinos en su dispositivo o datos.
  4. -
  5. Q: ¿Cuáles son los beneficios de jugar Plague Inc 1.18 5 Mod Apk Việt Hóa sobre la versión original?
  6. -
  7. A: Plague Inc 1.18 5 Mod Apk Việt Hóa le ofrece muchos beneficios sobre la versión original, como desbloquear todo el contenido premium de forma gratuita, dándole puntos de ADN ilimitados, apoyando el lenguaje vietnamita y mejorando los gráficos y efectos de sonido.
  8. -
  9. Q: ¿Cómo puedo actualizar Plague Inc 1.18 5 Mod Apk Việt Hóa a la última versión?
  10. -
  11. A: Para actualizar Plague Inc 1.18 5 Mod Apk Việt Hóa a la última versión, es necesario desinstalar la versión actual de su dispositivo y descargar la nueva versión desde el mismo enlace proporcionado anteriormente. Luego, debe instalar la nueva versión siguiendo los mismos pasos que antes.
  12. -
  13. Q: ¿Cómo puedo contactar al desarrollador de Plague Inc 1.18 5 Mod Apk Việt Hóa si tengo alguna pregunta o comentario?
  14. - -
  15. Q: ¿Cómo puedo apoyar al desarrollador de Plague Inc 1.18 5 Mod Apk Việt Hóa si me gusta su trabajo?
  16. -
  17. A: Usted puede apoyar al desarrollador de Plague Inc 1.18 5 Mod Apk Việt Hóa compartiendo su trabajo con sus amigos y familiares, dándoles comentarios positivos y calificaciones, o donando a ellos si tienen una opción de donación.
  18. -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Chicken Gun Apk Latest Version.md b/spaces/Benson/text-generation/Examples/Chicken Gun Apk Latest Version.md deleted file mode 100644 index 3605ebfe9ae55723254e378b45baed1ceeb86ad7..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Chicken Gun Apk Latest Version.md +++ /dev/null @@ -1,26 +0,0 @@ -
-

Pistola de pollo APK Ultima Versión: Un divertido y loco juego de disparos en línea

- Si usted está buscando un divertido y loco juego de disparos en línea, usted debe tratar de Chicken Gun APK ultima versión. Este es un juego en el que juegas como pollos armados que disparan y luchan entre sí. Puedes elegir entre dos modos: 5 vs 5 equipos o gratis para todos. También puede personalizar su gallo, arma, pico, zapatillas y gorras. Lanzar huevos explosivos y organizar una masacre. ¡Únete al tiroteo de pollo y diviértete!

¿Qué es la pistola de pollo APK?

- Chicken Gun APK es un juego para Android desarrollado por ChaloApps. Es un juego que combina acción, humor y características multijugador. Estas son algunas de las cosas que puedes hacer en este juego:

Un juego donde juegas como pollos armados

- En Chicken Gun APK, usted no es un soldado humano, pero un guerrero de pollo. Puedes elegir entre diferentes razas de pollos, como blanco, negro, marrón o rojo. Cada pollo tiene sus propias estadísticas y habilidades. También puedes equipar a tu pollo con varias armas, como pistolas, escopetas, rifles o granadas.

Un juego con dos modos: 5 vs 5 y gratis para todos

- Pistola de pollo APK ofrece dos modos de juego: 5 vs 5 equipos o gratis para todos. En el modo equipo, puedes unirte a un equipo de cinco pollos y competir contra otro equipo de cinco pollos. El equipo con más muertes gana. En el modo libre para todos, puedes jugar contra otros nueve pollos en una caótica batalla real. El último pollo en pie gana.

Un juego donde puedes personalizar tu gallo, arma, pico, zapatillas y gorras

- Pistola de pollo APK le permite personalizar su gallo de muchas maneras. Puede cambiar su arma, pico, zapatillas y gorras. También puedes desbloquear nuevos objetos jugando al juego o comprándolos con monedas. Puedes hacer que tu gallo se vea genial, divertido o aterrador.

¿Cómo descargar e instalar Chicken Gun APK?

- - Puede descargar el archivo APK desde una fuente de confianza, como [APKCombo]( 1 ), [APKLeon]( 3 ) o [APKBloch]( 2 ). Estos son sitios web que ofrecen descargas gratuitas y seguras de juegos y aplicaciones para Android. Puede buscar Chicken Gun APK en estos sitios web y descargar la última versión.

Habilitar fuentes desconocidas en su dispositivo

- Antes de que pueda instalar el archivo APK en su dispositivo, debe habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de Google Play Store. Para habilitar fuentes desconocidas, ve a Configuración > Seguridad > Fuentes desconocidas y conéctala.

Instala el archivo APK y disfruta del juego

- Después de haber descargado el archivo APK y habilitado fuentes desconocidas, puede instalar el archivo APK en su dispositivo. Para ello, localice el archivo en su carpeta de descargas y toque en él. Siga las instrucciones en la pantalla para instalar la aplicación. Una vez completada la instalación, puede abrir la aplicación y comenzar a jugar el juego.

¿Cuáles son las características de Chicken Gun APK?

- Pistola de pollo APK es un juego que ofrece muchas características que lo hacen divertido y emocionante. Estas son algunas de las características de este juego:

Gráficos de alta calidad y efectos de sonido

- Chicken Gun APK tiene gráficos de alta calidad y efectos de sonido que crean una experiencia realista e inmersiva. El juego tiene modelos 3D de pollos, armas y entornos. El juego también tiene física realista y animaciones. Los efectos de sonido son fuertes y claros, y se pueden escuchar los disparos, explosiones y ruidos de pollo.

Varias armas y artículos para usar

- Pistola de pollo APK tiene varias armas y artículos que se pueden utilizar para disparar y luchar con otros pollos. Puede elegir entre pistolas, escopetas, rifles, granadas, lanzacohetes, lanzallamas y más. También puedes usar huevos explosivos, kits de salud, armaduras y otros elementos para ayudarte en la batalla.

Diferentes mapas y escenarios para explorar

- - Pistola de pollo APK tiene un modo multijugador en línea que le permite jugar con otros jugadores de todo el mundo. Puede unirse a una sala pública o privada, o crear su propia habitación. También puede chatear y chatear por voz con otros jugadores utilizando las funciones integradas. Usted puede hacer amigos o enemigos, cooperar o competir, y tener un montón de diversión.

¿Cuáles son los consejos y trucos para jugar Chicken Gun APK?

- Pistola de pollo APK es un juego que requiere habilidad, estrategia y suerte. Estos son algunos consejos y trucos que pueden ayudarte a mejorar tu juego:

Intenta que la cabeza haga más daño

- Una de las habilidades más importantes en Chicken Gun APK está apuntando. Es necesario apuntar a la cabeza de sus enemigos para hacer más daño y matarlos más rápido. Puedes usar el punto de mira o el visor para apuntar mejor. También puedes ajustar la sensibilidad de tus controles para adaptarlos a tus preferencias.

Usa huevos explosivos para causar caos

- Uno de los artículos más divertidos y eficaces en Chicken Gun APK es el huevo explosivo. Puedes lanzar estos huevos a tus enemigos o a sus alrededores para causar explosiones y daños. Puedes usar estos huevos para distraer, confundir o eliminar a tus enemigos. También puedes usar estos huevos para destruir paredes, puertas o vehículos.

Esconderse detrás de la cubierta y moverse para evitar ser disparado

- Una de las estrategias más importantes en Chicken Gun APK se esconde detrás de la cubierta y moverse para evitar ser disparado. Necesitas encontrar un buen lugar donde puedas esconderte de la vista de tus enemigos y dispararles con seguridad. También es necesario moverse con frecuencia para evitar ser un objetivo fácil. Puedes usar botones de agacharte, saltar o sprint para ayudarte a moverte más rápido o sigilosamente.

Forma equipo con tus amigos y comunícate con ellos

- - Chicken Gun APK ultima versión es un divertido y loco juego de disparos en línea que debe probar si te gusta la acción, el humor y los juegos multijugador. Puedes jugar como pollos armados que disparan y luchan entre sí en diferentes modos, mapas y escenarios. También puede personalizar su gallo, arma, pico, zapatillas y gorras. Descargar e instalar Chicken Gun APK ahora y unirse al tiroteo de pollo!

Preguntas frecuentes

- Aquí hay algunas preguntas frecuentes sobre Chicken Gun APK: - P: ¿Es Chicken Gun APK libre? - A: Sí, Chicken Gun APK es gratis para descargar y jugar. Sin embargo, contiene anuncios y compras en la aplicación que se puede desactivar o comprar si lo desea. - P: ¿Es seguro Chicken Gun APK? - A: Sí, Chicken Gun APK es seguro para descargar e instalar si lo obtiene de una fuente de confianza, como [APKCombo], [APKLeon] o [APKBloch]. Estos son sitios web que ofrecen descargas gratuitas y seguras de juegos y aplicaciones para Android. También puede escanear el archivo APK con una aplicación antivirus antes de instalarlo para garantizar su seguridad. - P: ¿Cómo puedo actualizar Chicken Gun APK? - A: Puede actualizar Chicken Gun APK mediante la descarga e instalación de la última versión del archivo APK de la misma fuente que lo obtuvo de. También puede comprobar si hay actualizaciones dentro del juego yendo a Configuración > Acerca de > Buscar actualizaciones. - P: ¿Cómo puedo jugar Chicken Gun APK en PC? - A: Se puede jugar Chicken Gun APK en el PC mediante el uso de un emulador de Android, tales como [BlueStacks], [NoxPlayer] o [LDPlayer]. Estos son software que le permiten ejecutar aplicaciones y juegos de Android en su PC. Puede descargar e instalar un emulador en su PC, luego descargar e instalar Chicken Gun APK en el emulador, y luego jugar el juego como lo haría en su dispositivo. - P: ¿Cómo puedo contactar con el desarrollador de Chicken Gun APK? - A: Puede ponerse en contacto con el desarrollador de Chicken Gun APK enviando un correo electrónico a chaloapps@gmail.com. También puedes seguirlos en su [página de Facebook] o en su [canal de YouTube] para más actualizaciones y noticias sobre el juego.

-

chicken gun apk latest version


DOWNLOAD ✏ ✏ ✏ https://bltlly.com/2v6JFN



64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/depends.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/depends.py deleted file mode 100644 index adffd12db8c8e0477ee6532cd3b84f2e0cde9632..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/depends.py +++ /dev/null @@ -1,176 +0,0 @@ -import sys -import marshal -import contextlib -import dis - -from setuptools.extern.packaging import version - -from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE -from . import _imp - - -__all__ = [ - 'Require', 'find_module', 'get_module_constant', 'extract_constant' -] - - -class Require: - """A prerequisite to building or installing a distribution""" - - def __init__( - self, name, requested_version, module, homepage='', - attribute=None, format=None): - - if format is None and requested_version is not None: - format = version.Version - - if format is not None: - requested_version = format(requested_version) - if attribute is None: - attribute = '__version__' - - self.__dict__.update(locals()) - del self.self - - def full_name(self): - """Return full package/distribution name, w/version""" - if self.requested_version is not None: - return '%s-%s' % (self.name, self.requested_version) - return self.name - - def version_ok(self, version): - """Is 'version' sufficiently up-to-date?""" - return self.attribute is None or self.format is None or \ - str(version) != "unknown" and self.format(version) >= self.requested_version - - def get_version(self, paths=None, default="unknown"): - """Get version number of installed module, 'None', or 'default' - - Search 'paths' for module. If not found, return 'None'. If found, - return the extracted version attribute, or 'default' if no version - attribute was specified, or the value cannot be determined without - importing the module. The version is formatted according to the - requirement's version format (if any), unless it is 'None' or the - supplied 'default'. - """ - - if self.attribute is None: - try: - f, p, i = find_module(self.module, paths) - if f: - f.close() - return default - except ImportError: - return None - - v = get_module_constant(self.module, self.attribute, default, paths) - - if v is not None and v is not default and self.format is not None: - return self.format(v) - - return v - - def is_present(self, paths=None): - """Return true if dependency is present on 'paths'""" - return self.get_version(paths) is not None - - def is_current(self, paths=None): - """Return true if dependency is present and up-to-date on 'paths'""" - version = self.get_version(paths) - if version is None: - return False - return self.version_ok(str(version)) - - -def maybe_close(f): - @contextlib.contextmanager - def empty(): - yield - return - if not f: - return empty() - - return contextlib.closing(f) - - -def get_module_constant(module, symbol, default=-1, paths=None): - """Find 'module' by searching 'paths', and extract 'symbol' - - Return 'None' if 'module' does not exist on 'paths', or it does not define - 'symbol'. If the module defines 'symbol' as a constant, return the - constant. Otherwise, return 'default'.""" - - try: - f, path, (suffix, mode, kind) = info = find_module(module, paths) - except ImportError: - # Module doesn't exist - return None - - with maybe_close(f): - if kind == PY_COMPILED: - f.read(8) # skip magic & date - code = marshal.load(f) - elif kind == PY_FROZEN: - code = _imp.get_frozen_object(module, paths) - elif kind == PY_SOURCE: - code = compile(f.read(), path, 'exec') - else: - # Not something we can parse; we'll have to import it. :( - imported = _imp.get_module(module, paths, info) - return getattr(imported, symbol, None) - - return extract_constant(code, symbol, default) - - -def extract_constant(code, symbol, default=-1): - """Extract the constant value of 'symbol' from 'code' - - If the name 'symbol' is bound to a constant value by the Python code - object 'code', return that value. If 'symbol' is bound to an expression, - return 'default'. Otherwise, return 'None'. - - Return value is based on the first assignment to 'symbol'. 'symbol' must - be a global, or at least a non-"fast" local in the code block. That is, - only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' - must be present in 'code.co_names'. - """ - if symbol not in code.co_names: - # name's not there, can't possibly be an assignment - return None - - name_idx = list(code.co_names).index(symbol) - - STORE_NAME = 90 - STORE_GLOBAL = 97 - LOAD_CONST = 100 - - const = default - - for byte_code in dis.Bytecode(code): - op = byte_code.opcode - arg = byte_code.arg - - if op == LOAD_CONST: - const = code.co_consts[arg] - elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL): - return const - else: - const = default - - -def _update_globals(): - """ - Patch the globals to remove the objects not available on some platforms. - - XXX it'd be better to test assertions about bytecode instead. - """ - - if not sys.platform.startswith('java') and sys.platform != 'cli': - return - incompatible = 'extract_constant', 'get_module_constant' - for name in incompatible: - del globals()[name] - __all__.remove(name) - - -_update_globals() diff --git a/spaces/CVPR/Text2Human/Text2Human/models/losses/accuracy.py b/spaces/CVPR/Text2Human/Text2Human/models/losses/accuracy.py deleted file mode 100644 index 8e17db52c85aa693fe8a2f6d0036afc432580cfc..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Text2Human/Text2Human/models/losses/accuracy.py +++ /dev/null @@ -1,46 +0,0 @@ -def accuracy(pred, target, topk=1, thresh=None): - """Calculate accuracy according to the prediction and target. - - Args: - pred (torch.Tensor): The model prediction, shape (N, num_class, ...) - target (torch.Tensor): The target of each prediction, shape (N, , ...) - topk (int | tuple[int], optional): If the predictions in ``topk`` - matches the target, the predictions will be regarded as - correct ones. Defaults to 1. - thresh (float, optional): If not None, predictions with scores under - this threshold are considered incorrect. Default to None. - - Returns: - float | tuple[float]: If the input ``topk`` is a single integer, - the function will return a single float as accuracy. If - ``topk`` is a tuple containing multiple integers, the - function will return a tuple containing accuracies of - each ``topk`` number. - """ - assert isinstance(topk, (int, tuple)) - if isinstance(topk, int): - topk = (topk, ) - return_single = True - else: - return_single = False - - maxk = max(topk) - if pred.size(0) == 0: - accu = [pred.new_tensor(0.) for i in range(len(topk))] - return accu[0] if return_single else accu - assert pred.ndim == target.ndim + 1 - assert pred.size(0) == target.size(0) - assert maxk <= pred.size(1), \ - f'maxk {maxk} exceeds pred dimension {pred.size(1)}' - pred_value, pred_label = pred.topk(maxk, dim=1) - # transpose to shape (maxk, N, ...) - pred_label = pred_label.transpose(0, 1) - correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label)) - if thresh is not None: - # Only prediction values larger than thresh are counted as correct - correct = correct & (pred_value > thresh).t() - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / target.numel())) - return res[0] if return_single else res diff --git a/spaces/CVPR/TokenCut/README.md b/spaces/CVPR/TokenCut/README.md deleted file mode 100644 index 7714fab18c88bf10b0ee5cd04b4bca3ec5ea7e55..0000000000000000000000000000000000000000 --- a/spaces/CVPR/TokenCut/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: TokenCut -emoji: 😎 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.0.15 -app_file: app.py -pinned: false ---- - -This Demo is the TokenCut demo, the original demo is from https://huggingface.co/spaces/akhaliq/TokenCut. Thanks for Ahsen Khaliq's nicely contribution. diff --git a/spaces/ChenWu98/Stable-CycleDiffusion/ptp_utils.py b/spaces/ChenWu98/Stable-CycleDiffusion/ptp_utils.py deleted file mode 100644 index 00a8e1a98d1147690ab6e21060a450f700cecb49..0000000000000000000000000000000000000000 --- a/spaces/ChenWu98/Stable-CycleDiffusion/ptp_utils.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2022 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import torch -from typing import Optional, Union, Tuple, Dict - - -def register_attention_control(model, controller): - def ca_forward(self, place_in_unet): - - def forward(x, context=None, mask=None): - batch_size, sequence_length, dim = x.shape - h = self.heads - q = self.to_q(x) - is_cross = context is not None - context = context if is_cross else x - k = self.to_k(context) - v = self.to_v(context) - q = self.reshape_heads_to_batch_dim(q) - k = self.reshape_heads_to_batch_dim(k) - v = self.reshape_heads_to_batch_dim(v) - - sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale - - if mask is not None: - mask = mask.reshape(batch_size, -1) - max_neg_value = -torch.finfo(sim.dtype).max - mask = mask[:, None, :].repeat(h, 1, 1) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - attn = controller(attn, is_cross, place_in_unet) - out = torch.einsum("b i j, b j d -> b i d", attn, v) - out = self.reshape_batch_dim_to_heads(out) - - # TODO: Chen (new version of diffusers) - # return self.to_out(out) - # linear proj - out = self.to_out[0](out) - # dropout - out = self.to_out[1](out) - return out - - return forward - - def register_recr(net_, count, place_in_unet): - if net_.__class__.__name__ == 'CrossAttention': - net_.forward = ca_forward(net_, place_in_unet) - return count + 1 - elif hasattr(net_, 'children'): - for net__ in net_.children(): - count = register_recr(net__, count, place_in_unet) - return count - - cross_att_count = 0 - sub_nets = model.unet.named_children() - for net in sub_nets: - if "down" in net[0]: - cross_att_count += register_recr(net[1], 0, "down") - elif "up" in net[0]: - cross_att_count += register_recr(net[1], 0, "up") - elif "mid" in net[0]: - cross_att_count += register_recr(net[1], 0, "mid") - controller.num_att_layers = cross_att_count - - -def get_word_inds(text: str, word_place: int, tokenizer): - split_text = text.split(" ") - if type(word_place) is str: - word_place = [i for i, word in enumerate(split_text) if word_place == word] - elif type(word_place) is int: - word_place = [word_place] - out = [] - if len(word_place) > 0: - words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1] - cur_len, ptr = 0, 0 - - for i in range(len(words_encode)): - cur_len += len(words_encode[i]) - if ptr in word_place: - out.append(i + 1) - if cur_len >= len(split_text[ptr]): - ptr += 1 - cur_len = 0 - return np.array(out) - - -def update_alpha_time_word(alpha, bounds: Union[float, Tuple[float, float]], prompt_ind: int, word_inds: Optional[torch.Tensor]=None): - if type(bounds) is float: - bounds = 0, bounds - start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0]) - if word_inds is None: - word_inds = torch.arange(alpha.shape[2]) - alpha[: start, prompt_ind, word_inds] = 0 - alpha[start: end, prompt_ind, word_inds] = 1 - alpha[end:, prompt_ind, word_inds] = 0 - return alpha - - -def get_time_words_attention_alpha(prompts, num_steps, cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]], - tokenizer, max_num_words=77): - if type(cross_replace_steps) is not dict: - cross_replace_steps = {"default_": cross_replace_steps} - if "default_" not in cross_replace_steps: - cross_replace_steps["default_"] = (0., 1.) - alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words) - for i in range(len(prompts) - 1): - alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"], - i) - for key, item in cross_replace_steps.items(): - if key != "default_": - inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))] - for i, ind in enumerate(inds): - if len(ind) > 0: - alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind) - alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words) # time, batch, heads, pixels, words - return alpha_time_words diff --git a/spaces/Cvandi/remake/app.py b/spaces/Cvandi/remake/app.py deleted file mode 100644 index 97c59221c429e335c3a2e3413c11cc155d5b6122..0000000000000000000000000000000000000000 --- a/spaces/Cvandi/remake/app.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -os.system("pip install gradio==2.9b23") -import random -import gradio as gr -from PIL import Image -import torch -from random import randint -import sys -from subprocess import call -import psutil - - - - -torch.hub.download_url_to_file('http://people.csail.mit.edu/billf/project%20pages/sresCode/Markov%20Random%20Fields%20for%20Super-Resolution_files/100075_lowres.jpg', 'bear.jpg') - - -def run_cmd(command): - try: - print(command) - call(command, shell=True) - except KeyboardInterrupt: - print("Process interrupted") - sys.exit(1) -run_cmd("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P .") -run_cmd("pip install basicsr") -run_cmd("pip freeze") - -os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P .") - - -def inference(img,mode): - _id = randint(1, 10000) - INPUT_DIR = "/tmp/input_image" + str(_id) + "/" - OUTPUT_DIR = "/tmp/output_image" + str(_id) + "/" - run_cmd("rm -rf " + INPUT_DIR) - run_cmd("rm -rf " + OUTPUT_DIR) - run_cmd("mkdir " + INPUT_DIR) - run_cmd("mkdir " + OUTPUT_DIR) - basewidth = 256 - wpercent = (basewidth/float(img.size[0])) - hsize = int((float(img.size[1])*float(wpercent))) - img = img.resize((basewidth,hsize), Image.ANTIALIAS) - img.save(INPUT_DIR + "1.jpg", "JPEG") - if mode == "base": - run_cmd("python inference_realesrgan.py -n RealESRGAN_x4plus -i "+ INPUT_DIR + " -o " + OUTPUT_DIR) - else: - os.system("python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i "+ INPUT_DIR + " -o " + OUTPUT_DIR) - return os.path.join(OUTPUT_DIR, "1_out.jpg") - - - - -title = "Real-ESRGAN" -description = "Gradio demo for Real-ESRGAN. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once" -article = "

Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data | Github Repo

" - -gr.Interface( - inference, - [gr.inputs.Image(type="pil", label="Input"),gr.inputs.Radio(["base","anime"], type="value", default="base", label="model type")], - gr.outputs.Image(type="file", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['bear.jpg','base'], - ['anime.png','anime'] - ]).launch() \ No newline at end of file diff --git a/spaces/DJQmUKV/rvc-inference/infer_pack/models_onnx_moess.py b/spaces/DJQmUKV/rvc-inference/infer_pack/models_onnx_moess.py deleted file mode 100644 index 12efb0629a2e3d0d746a34f467254536c2bdbe5f..0000000000000000000000000000000000000000 --- a/spaces/DJQmUKV/rvc-inference/infer_pack/models_onnx_moess.py +++ /dev/null @@ -1,849 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, pitch, nsff0, sid, rnd, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds.unsqueeze(0)).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageGrab.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageGrab.py deleted file mode 100644 index 927033c6073a28ae67c0e33ec53ec660c741b194..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageGrab.py +++ /dev/null @@ -1,169 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# screen grabber -# -# History: -# 2001-04-26 fl created -# 2001-09-17 fl use builtin driver, if present -# 2002-11-19 fl added grabclipboard support -# -# Copyright (c) 2001-2002 by Secret Labs AB -# Copyright (c) 2001-2002 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import io -import os -import shutil -import subprocess -import sys -import tempfile - -from . import Image - - -def grab(bbox=None, include_layered_windows=False, all_screens=False, xdisplay=None): - if xdisplay is None: - if sys.platform == "darwin": - fh, filepath = tempfile.mkstemp(".png") - os.close(fh) - args = ["screencapture"] - if bbox: - left, top, right, bottom = bbox - args += ["-R", f"{left},{top},{right-left},{bottom-top}"] - subprocess.call(args + ["-x", filepath]) - im = Image.open(filepath) - im.load() - os.unlink(filepath) - if bbox: - im_resized = im.resize((right - left, bottom - top)) - im.close() - return im_resized - return im - elif sys.platform == "win32": - offset, size, data = Image.core.grabscreen_win32( - include_layered_windows, all_screens - ) - im = Image.frombytes( - "RGB", - size, - data, - # RGB, 32-bit line padding, origin lower left corner - "raw", - "BGR", - (size[0] * 3 + 3) & -4, - -1, - ) - if bbox: - x0, y0 = offset - left, top, right, bottom = bbox - im = im.crop((left - x0, top - y0, right - x0, bottom - y0)) - return im - try: - if not Image.core.HAVE_XCB: - msg = "Pillow was built without XCB support" - raise OSError(msg) - size, data = Image.core.grabscreen_x11(xdisplay) - except OSError: - if ( - xdisplay is None - and sys.platform not in ("darwin", "win32") - and shutil.which("gnome-screenshot") - ): - fh, filepath = tempfile.mkstemp(".png") - os.close(fh) - subprocess.call(["gnome-screenshot", "-f", filepath]) - im = Image.open(filepath) - im.load() - os.unlink(filepath) - if bbox: - im_cropped = im.crop(bbox) - im.close() - return im_cropped - return im - else: - raise - else: - im = Image.frombytes("RGB", size, data, "raw", "BGRX", size[0] * 4, 1) - if bbox: - im = im.crop(bbox) - return im - - -def grabclipboard(): - if sys.platform == "darwin": - fh, filepath = tempfile.mkstemp(".png") - os.close(fh) - commands = [ - 'set theFile to (open for access POSIX file "' - + filepath - + '" with write permission)', - "try", - " write (the clipboard as «class PNGf») to theFile", - "end try", - "close access theFile", - ] - script = ["osascript"] - for command in commands: - script += ["-e", command] - subprocess.call(script) - - im = None - if os.stat(filepath).st_size != 0: - im = Image.open(filepath) - im.load() - os.unlink(filepath) - return im - elif sys.platform == "win32": - fmt, data = Image.core.grabclipboard_win32() - if fmt == "file": # CF_HDROP - import struct - - o = struct.unpack_from("I", data)[0] - if data[16] != 0: - files = data[o:].decode("utf-16le").split("\0") - else: - files = data[o:].decode("mbcs").split("\0") - return files[: files.index("")] - if isinstance(data, bytes): - data = io.BytesIO(data) - if fmt == "png": - from . import PngImagePlugin - - return PngImagePlugin.PngImageFile(data) - elif fmt == "DIB": - from . import BmpImagePlugin - - return BmpImagePlugin.DibImageFile(data) - return None - else: - if shutil.which("wl-paste"): - output = subprocess.check_output(["wl-paste", "-l"]).decode() - mimetypes = output.splitlines() - if "image/png" in mimetypes: - mimetype = "image/png" - elif mimetypes: - mimetype = mimetypes[0] - else: - mimetype = None - - args = ["wl-paste"] - if mimetype: - args.extend(["-t", mimetype]) - elif shutil.which("xclip"): - args = ["xclip", "-selection", "clipboard", "-t", "image/png", "-o"] - else: - msg = "wl-paste or xclip is required for ImageGrab.grabclipboard() on Linux" - raise NotImplementedError(msg) - p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - err = p.stderr - if err: - msg = f"{args[0]} error: {err.strip().decode()}" - raise ChildProcessError(msg) - data = io.BytesIO(p.stdout) - im = Image.open(data) - im.load() - return im diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_fileresponse.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_fileresponse.py deleted file mode 100644 index f41ed3fd0a9c1e0d5e45ce1e97b99bfef8361cac..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_fileresponse.py +++ /dev/null @@ -1,288 +0,0 @@ -import asyncio -import mimetypes -import os -import pathlib -import sys -from typing import ( # noqa - IO, - TYPE_CHECKING, - Any, - Awaitable, - Callable, - Iterator, - List, - Optional, - Tuple, - Union, - cast, -) - -from . import hdrs -from .abc import AbstractStreamWriter -from .helpers import ETAG_ANY, ETag -from .typedefs import Final, LooseHeaders -from .web_exceptions import ( - HTTPNotModified, - HTTPPartialContent, - HTTPPreconditionFailed, - HTTPRequestRangeNotSatisfiable, -) -from .web_response import StreamResponse - -__all__ = ("FileResponse",) - -if TYPE_CHECKING: # pragma: no cover - from .web_request import BaseRequest - - -_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]] - - -NOSENDFILE: Final[bool] = bool(os.environ.get("AIOHTTP_NOSENDFILE")) - - -class FileResponse(StreamResponse): - """A response object can be used to send files.""" - - def __init__( - self, - path: Union[str, pathlib.Path], - chunk_size: int = 256 * 1024, - status: int = 200, - reason: Optional[str] = None, - headers: Optional[LooseHeaders] = None, - ) -> None: - super().__init__(status=status, reason=reason, headers=headers) - - if isinstance(path, str): - path = pathlib.Path(path) - - self._path = path - self._chunk_size = chunk_size - - async def _sendfile_fallback( - self, writer: AbstractStreamWriter, fobj: IO[Any], offset: int, count: int - ) -> AbstractStreamWriter: - # To keep memory usage low,fobj is transferred in chunks - # controlled by the constructor's chunk_size argument. - - chunk_size = self._chunk_size - loop = asyncio.get_event_loop() - - await loop.run_in_executor(None, fobj.seek, offset) - - chunk = await loop.run_in_executor(None, fobj.read, chunk_size) - while chunk: - await writer.write(chunk) - count = count - chunk_size - if count <= 0: - break - chunk = await loop.run_in_executor(None, fobj.read, min(chunk_size, count)) - - await writer.drain() - return writer - - async def _sendfile( - self, request: "BaseRequest", fobj: IO[Any], offset: int, count: int - ) -> AbstractStreamWriter: - writer = await super().prepare(request) - assert writer is not None - - if NOSENDFILE or sys.version_info < (3, 7) or self.compression: - return await self._sendfile_fallback(writer, fobj, offset, count) - - loop = request._loop - transport = request.transport - assert transport is not None - - try: - await loop.sendfile(transport, fobj, offset, count) - except NotImplementedError: - return await self._sendfile_fallback(writer, fobj, offset, count) - - await super().write_eof() - return writer - - @staticmethod - def _strong_etag_match(etag_value: str, etags: Tuple[ETag, ...]) -> bool: - if len(etags) == 1 and etags[0].value == ETAG_ANY: - return True - return any(etag.value == etag_value for etag in etags if not etag.is_weak) - - async def _not_modified( - self, request: "BaseRequest", etag_value: str, last_modified: float - ) -> Optional[AbstractStreamWriter]: - self.set_status(HTTPNotModified.status_code) - self._length_check = False - self.etag = etag_value # type: ignore[assignment] - self.last_modified = last_modified # type: ignore[assignment] - # Delete any Content-Length headers provided by user. HTTP 304 - # should always have empty response body - return await super().prepare(request) - - async def _precondition_failed( - self, request: "BaseRequest" - ) -> Optional[AbstractStreamWriter]: - self.set_status(HTTPPreconditionFailed.status_code) - self.content_length = 0 - return await super().prepare(request) - - async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]: - filepath = self._path - - gzip = False - if "gzip" in request.headers.get(hdrs.ACCEPT_ENCODING, ""): - gzip_path = filepath.with_name(filepath.name + ".gz") - - if gzip_path.is_file(): - filepath = gzip_path - gzip = True - - loop = asyncio.get_event_loop() - st: os.stat_result = await loop.run_in_executor(None, filepath.stat) - - etag_value = f"{st.st_mtime_ns:x}-{st.st_size:x}" - last_modified = st.st_mtime - - # https://tools.ietf.org/html/rfc7232#section-6 - ifmatch = request.if_match - if ifmatch is not None and not self._strong_etag_match(etag_value, ifmatch): - return await self._precondition_failed(request) - - unmodsince = request.if_unmodified_since - if ( - unmodsince is not None - and ifmatch is None - and st.st_mtime > unmodsince.timestamp() - ): - return await self._precondition_failed(request) - - ifnonematch = request.if_none_match - if ifnonematch is not None and self._strong_etag_match(etag_value, ifnonematch): - return await self._not_modified(request, etag_value, last_modified) - - modsince = request.if_modified_since - if ( - modsince is not None - and ifnonematch is None - and st.st_mtime <= modsince.timestamp() - ): - return await self._not_modified(request, etag_value, last_modified) - - if hdrs.CONTENT_TYPE not in self.headers: - ct, encoding = mimetypes.guess_type(str(filepath)) - if not ct: - ct = "application/octet-stream" - should_set_ct = True - else: - encoding = "gzip" if gzip else None - should_set_ct = False - - status = self._status - file_size = st.st_size - count = file_size - - start = None - - ifrange = request.if_range - if ifrange is None or st.st_mtime <= ifrange.timestamp(): - # If-Range header check: - # condition = cached date >= last modification date - # return 206 if True else 200. - # if False: - # Range header would not be processed, return 200 - # if True but Range header missing - # return 200 - try: - rng = request.http_range - start = rng.start - end = rng.stop - except ValueError: - # https://tools.ietf.org/html/rfc7233: - # A server generating a 416 (Range Not Satisfiable) response to - # a byte-range request SHOULD send a Content-Range header field - # with an unsatisfied-range value. - # The complete-length in a 416 response indicates the current - # length of the selected representation. - # - # Will do the same below. Many servers ignore this and do not - # send a Content-Range header with HTTP 416 - self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}" - self.set_status(HTTPRequestRangeNotSatisfiable.status_code) - return await super().prepare(request) - - # If a range request has been made, convert start, end slice - # notation into file pointer offset and count - if start is not None or end is not None: - if start < 0 and end is None: # return tail of file - start += file_size - if start < 0: - # if Range:bytes=-1000 in request header but file size - # is only 200, there would be trouble without this - start = 0 - count = file_size - start - else: - # rfc7233:If the last-byte-pos value is - # absent, or if the value is greater than or equal to - # the current length of the representation data, - # the byte range is interpreted as the remainder - # of the representation (i.e., the server replaces the - # value of last-byte-pos with a value that is one less than - # the current length of the selected representation). - count = ( - min(end if end is not None else file_size, file_size) - start - ) - - if start >= file_size: - # HTTP 416 should be returned in this case. - # - # According to https://tools.ietf.org/html/rfc7233: - # If a valid byte-range-set includes at least one - # byte-range-spec with a first-byte-pos that is less than - # the current length of the representation, or at least one - # suffix-byte-range-spec with a non-zero suffix-length, - # then the byte-range-set is satisfiable. Otherwise, the - # byte-range-set is unsatisfiable. - self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}" - self.set_status(HTTPRequestRangeNotSatisfiable.status_code) - return await super().prepare(request) - - status = HTTPPartialContent.status_code - # Even though you are sending the whole file, you should still - # return a HTTP 206 for a Range request. - self.set_status(status) - - if should_set_ct: - self.content_type = ct # type: ignore[assignment] - if encoding: - self.headers[hdrs.CONTENT_ENCODING] = encoding - if gzip: - self.headers[hdrs.VARY] = hdrs.ACCEPT_ENCODING - - self.etag = etag_value # type: ignore[assignment] - self.last_modified = st.st_mtime # type: ignore[assignment] - self.content_length = count - - self.headers[hdrs.ACCEPT_RANGES] = "bytes" - - real_start = cast(int, start) - - if status == HTTPPartialContent.status_code: - self.headers[hdrs.CONTENT_RANGE] = "bytes {}-{}/{}".format( - real_start, real_start + count - 1, file_size - ) - - # If we are sending 0 bytes calling sendfile() will throw a ValueError - if count == 0 or request.method == hdrs.METH_HEAD or self.status in [204, 304]: - return await super().prepare(request) - - fobj = await loop.run_in_executor(None, filepath.open, "rb") - if start: # be aware that start could be None or int=0 here. - offset = start - else: - offset = 0 - - try: - return await self._sendfile(request, fobj, offset, count) - finally: - await loop.run_in_executor(None, fobj.close) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_winconsole.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_winconsole.py deleted file mode 100644 index 6b20df315b23ecd1e3d0ec32c11c0b5ced577efe..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_winconsole.py +++ /dev/null @@ -1,279 +0,0 @@ -# This module is based on the excellent work by Adam Bartoš who -# provided a lot of what went into the implementation here in -# the discussion to issue1602 in the Python bug tracker. -# -# There are some general differences in regards to how this works -# compared to the original patches as we do not need to patch -# the entire interpreter but just work in our little world of -# echo and prompt. -import io -import sys -import time -import typing as t -from ctypes import byref -from ctypes import c_char -from ctypes import c_char_p -from ctypes import c_int -from ctypes import c_ssize_t -from ctypes import c_ulong -from ctypes import c_void_p -from ctypes import POINTER -from ctypes import py_object -from ctypes import Structure -from ctypes.wintypes import DWORD -from ctypes.wintypes import HANDLE -from ctypes.wintypes import LPCWSTR -from ctypes.wintypes import LPWSTR - -from ._compat import _NonClosingTextIOWrapper - -assert sys.platform == "win32" -import msvcrt # noqa: E402 -from ctypes import windll # noqa: E402 -from ctypes import WINFUNCTYPE # noqa: E402 - -c_ssize_p = POINTER(c_ssize_t) - -kernel32 = windll.kernel32 -GetStdHandle = kernel32.GetStdHandle -ReadConsoleW = kernel32.ReadConsoleW -WriteConsoleW = kernel32.WriteConsoleW -GetConsoleMode = kernel32.GetConsoleMode -GetLastError = kernel32.GetLastError -GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32)) -CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( - ("CommandLineToArgvW", windll.shell32) -) -LocalFree = WINFUNCTYPE(c_void_p, c_void_p)(("LocalFree", windll.kernel32)) - -STDIN_HANDLE = GetStdHandle(-10) -STDOUT_HANDLE = GetStdHandle(-11) -STDERR_HANDLE = GetStdHandle(-12) - -PyBUF_SIMPLE = 0 -PyBUF_WRITABLE = 1 - -ERROR_SUCCESS = 0 -ERROR_NOT_ENOUGH_MEMORY = 8 -ERROR_OPERATION_ABORTED = 995 - -STDIN_FILENO = 0 -STDOUT_FILENO = 1 -STDERR_FILENO = 2 - -EOF = b"\x1a" -MAX_BYTES_WRITTEN = 32767 - -try: - from ctypes import pythonapi -except ImportError: - # On PyPy we cannot get buffers so our ability to operate here is - # severely limited. - get_buffer = None -else: - - class Py_buffer(Structure): - _fields_ = [ - ("buf", c_void_p), - ("obj", py_object), - ("len", c_ssize_t), - ("itemsize", c_ssize_t), - ("readonly", c_int), - ("ndim", c_int), - ("format", c_char_p), - ("shape", c_ssize_p), - ("strides", c_ssize_p), - ("suboffsets", c_ssize_p), - ("internal", c_void_p), - ] - - PyObject_GetBuffer = pythonapi.PyObject_GetBuffer - PyBuffer_Release = pythonapi.PyBuffer_Release - - def get_buffer(obj, writable=False): - buf = Py_buffer() - flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE - PyObject_GetBuffer(py_object(obj), byref(buf), flags) - - try: - buffer_type = c_char * buf.len - return buffer_type.from_address(buf.buf) - finally: - PyBuffer_Release(byref(buf)) - - -class _WindowsConsoleRawIOBase(io.RawIOBase): - def __init__(self, handle): - self.handle = handle - - def isatty(self): - super().isatty() - return True - - -class _WindowsConsoleReader(_WindowsConsoleRawIOBase): - def readable(self): - return True - - def readinto(self, b): - bytes_to_be_read = len(b) - if not bytes_to_be_read: - return 0 - elif bytes_to_be_read % 2: - raise ValueError( - "cannot read odd number of bytes from UTF-16-LE encoded console" - ) - - buffer = get_buffer(b, writable=True) - code_units_to_be_read = bytes_to_be_read // 2 - code_units_read = c_ulong() - - rv = ReadConsoleW( - HANDLE(self.handle), - buffer, - code_units_to_be_read, - byref(code_units_read), - None, - ) - if GetLastError() == ERROR_OPERATION_ABORTED: - # wait for KeyboardInterrupt - time.sleep(0.1) - if not rv: - raise OSError(f"Windows error: {GetLastError()}") - - if buffer[0] == EOF: - return 0 - return 2 * code_units_read.value - - -class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): - def writable(self): - return True - - @staticmethod - def _get_error_message(errno): - if errno == ERROR_SUCCESS: - return "ERROR_SUCCESS" - elif errno == ERROR_NOT_ENOUGH_MEMORY: - return "ERROR_NOT_ENOUGH_MEMORY" - return f"Windows error {errno}" - - def write(self, b): - bytes_to_be_written = len(b) - buf = get_buffer(b) - code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2 - code_units_written = c_ulong() - - WriteConsoleW( - HANDLE(self.handle), - buf, - code_units_to_be_written, - byref(code_units_written), - None, - ) - bytes_written = 2 * code_units_written.value - - if bytes_written == 0 and bytes_to_be_written > 0: - raise OSError(self._get_error_message(GetLastError())) - return bytes_written - - -class ConsoleStream: - def __init__(self, text_stream: t.TextIO, byte_stream: t.BinaryIO) -> None: - self._text_stream = text_stream - self.buffer = byte_stream - - @property - def name(self) -> str: - return self.buffer.name - - def write(self, x: t.AnyStr) -> int: - if isinstance(x, str): - return self._text_stream.write(x) - try: - self.flush() - except Exception: - pass - return self.buffer.write(x) - - def writelines(self, lines: t.Iterable[t.AnyStr]) -> None: - for line in lines: - self.write(line) - - def __getattr__(self, name: str) -> t.Any: - return getattr(self._text_stream, name) - - def isatty(self) -> bool: - return self.buffer.isatty() - - def __repr__(self): - return f"" - - -def _get_text_stdin(buffer_stream: t.BinaryIO) -> t.TextIO: - text_stream = _NonClosingTextIOWrapper( - io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), - "utf-16-le", - "strict", - line_buffering=True, - ) - return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) - - -def _get_text_stdout(buffer_stream: t.BinaryIO) -> t.TextIO: - text_stream = _NonClosingTextIOWrapper( - io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)), - "utf-16-le", - "strict", - line_buffering=True, - ) - return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) - - -def _get_text_stderr(buffer_stream: t.BinaryIO) -> t.TextIO: - text_stream = _NonClosingTextIOWrapper( - io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)), - "utf-16-le", - "strict", - line_buffering=True, - ) - return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) - - -_stream_factories: t.Mapping[int, t.Callable[[t.BinaryIO], t.TextIO]] = { - 0: _get_text_stdin, - 1: _get_text_stdout, - 2: _get_text_stderr, -} - - -def _is_console(f: t.TextIO) -> bool: - if not hasattr(f, "fileno"): - return False - - try: - fileno = f.fileno() - except (OSError, io.UnsupportedOperation): - return False - - handle = msvcrt.get_osfhandle(fileno) - return bool(GetConsoleMode(handle, byref(DWORD()))) - - -def _get_windows_console_stream( - f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str] -) -> t.Optional[t.TextIO]: - if ( - get_buffer is not None - and encoding in {"utf-16-le", None} - and errors in {"strict", None} - and _is_console(f) - ): - func = _stream_factories.get(f.fileno()) - if func is not None: - b = getattr(f, "buffer", None) - - if b is None: - return None - - return func(b) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_h_e_a_d.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_h_e_a_d.py deleted file mode 100644 index 04505e8250919eb666b8412e2d12cd739cc16bde..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_h_e_a_d.py +++ /dev/null @@ -1,124 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat -from fontTools.misc.textTools import safeEval, num2binary, binary2num -from fontTools.misc.timeTools import ( - timestampFromString, - timestampToString, - timestampNow, -) -from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat -from fontTools.misc.arrayTools import intRect, unionRect -from . import DefaultTable -import logging - - -log = logging.getLogger(__name__) - -headFormat = """ - > # big endian - tableVersion: 16.16F - fontRevision: 16.16F - checkSumAdjustment: I - magicNumber: I - flags: H - unitsPerEm: H - created: Q - modified: Q - xMin: h - yMin: h - xMax: h - yMax: h - macStyle: H - lowestRecPPEM: H - fontDirectionHint: h - indexToLocFormat: h - glyphDataFormat: h -""" - - -class table__h_e_a_d(DefaultTable.DefaultTable): - - dependencies = ["maxp", "loca", "CFF ", "CFF2"] - - def decompile(self, data, ttFont): - dummy, rest = sstruct.unpack2(headFormat, data, self) - if rest: - # this is quite illegal, but there seem to be fonts out there that do this - log.warning("extra bytes at the end of 'head' table") - assert rest == b"\0\0" - - # For timestamp fields, ignore the top four bytes. Some fonts have - # bogus values there. Since till 2038 those bytes only can be zero, - # ignore them. - # - # https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810 - for stamp in "created", "modified": - value = getattr(self, stamp) - if value > 0xFFFFFFFF: - log.warning("'%s' timestamp out of range; ignoring top bytes", stamp) - value &= 0xFFFFFFFF - setattr(self, stamp, value) - if value < 0x7C259DC0: # January 1, 1970 00:00:00 - log.warning( - "'%s' timestamp seems very low; regarding as unix timestamp", stamp - ) - value += 0x7C259DC0 - setattr(self, stamp, value) - - def compile(self, ttFont): - if ttFont.recalcBBoxes: - # For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc(). - if "CFF " in ttFont: - topDict = ttFont["CFF "].cff.topDictIndex[0] - self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox) - elif "CFF2" in ttFont: - topDict = ttFont["CFF2"].cff.topDictIndex[0] - charStrings = topDict.CharStrings - fontBBox = None - for charString in charStrings.values(): - bounds = charString.calcBounds(charStrings) - if bounds is not None: - if fontBBox is not None: - fontBBox = unionRect(fontBBox, bounds) - else: - fontBBox = bounds - if fontBBox is not None: - self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox) - if ttFont.recalcTimestamp: - self.modified = timestampNow() - data = sstruct.pack(headFormat, self) - return data - - def toXML(self, writer, ttFont): - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - _, names, fixes = sstruct.getformat(headFormat) - for name in names: - value = getattr(self, name) - if name in fixes: - value = floatToFixedToStr(value, precisionBits=fixes[name]) - elif name in ("created", "modified"): - value = timestampToString(value) - elif name in ("magicNumber", "checkSumAdjustment"): - if value < 0: - value = value + 0x100000000 - value = hex(value) - if value[-1:] == "L": - value = value[:-1] - elif name in ("macStyle", "flags"): - value = num2binary(value, 16) - writer.simpletag(name, value=value) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - value = attrs["value"] - fixes = sstruct.getformat(headFormat)[2] - if name in fixes: - value = strToFixedToFloat(value, precisionBits=fixes[name]) - elif name in ("created", "modified"): - value = timestampFromString(value) - elif name in ("macStyle", "flags"): - value = binary2num(value) - else: - value = safeEval(value) - setattr(self, name, value) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/ranged_response.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/ranged_response.py deleted file mode 100644 index 88eb696184e56f683f8feabbf895a1bd6346a667..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/ranged_response.py +++ /dev/null @@ -1,185 +0,0 @@ -# Taken from https://gist.github.com/kevinastone/a6a62db57577b3f24e8a6865ed311463 -# Context: https://github.com/encode/starlette/pull/1090 -from __future__ import annotations - -import os -import re -import stat -from typing import NamedTuple -from urllib.parse import quote - -import aiofiles -from aiofiles.os import stat as aio_stat -from starlette.datastructures import Headers -from starlette.exceptions import HTTPException -from starlette.responses import Response, guess_type -from starlette.staticfiles import StaticFiles -from starlette.types import Receive, Scope, Send - -RANGE_REGEX = re.compile(r"^bytes=(?P\d+)-(?P\d*)$") - - -class ClosedRange(NamedTuple): - start: int - end: int - - def __len__(self) -> int: - return self.end - self.start + 1 - - def __bool__(self) -> bool: - return len(self) > 0 - - -class OpenRange(NamedTuple): - start: int - end: int | None = None - - def clamp(self, start: int, end: int) -> ClosedRange: - begin = max(self.start, start) - end = min(x for x in (self.end, end) if x) - - begin = min(begin, end) - end = max(begin, end) - - return ClosedRange(begin, end) - - -class RangedFileResponse(Response): - chunk_size = 4096 - - def __init__( - self, - path: str | os.PathLike, - range: OpenRange, - headers: dict[str, str] | None = None, - media_type: str | None = None, - filename: str | None = None, - stat_result: os.stat_result | None = None, - method: str | None = None, - ) -> None: - assert aiofiles is not None, "'aiofiles' must be installed to use FileResponse" - self.path = path - self.range = range - self.filename = filename - self.background = None - self.send_header_only = method is not None and method.upper() == "HEAD" - if media_type is None: - media_type = guess_type(filename or path)[0] or "text/plain" - self.media_type = media_type - self.init_headers(headers or {}) - if self.filename is not None: - content_disposition_filename = quote(self.filename) - if content_disposition_filename != self.filename: - content_disposition = ( - f"attachment; filename*=utf-8''{content_disposition_filename}" - ) - else: - content_disposition = f'attachment; filename="{self.filename}"' - self.headers.setdefault("content-disposition", content_disposition) - self.stat_result = stat_result - - def set_range_headers(self, range: ClosedRange) -> None: - assert self.stat_result - total_length = self.stat_result.st_size - content_length = len(range) - self.headers[ - "content-range" - ] = f"bytes {range.start}-{range.end}/{total_length}" - self.headers["content-length"] = str(content_length) - pass - - async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: - if self.stat_result is None: - try: - stat_result = await aio_stat(self.path) - self.stat_result = stat_result - except FileNotFoundError as fnfe: - raise RuntimeError( - f"File at path {self.path} does not exist." - ) from fnfe - else: - mode = stat_result.st_mode - if not stat.S_ISREG(mode): - raise RuntimeError(f"File at path {self.path} is not a file.") - - byte_range = self.range.clamp(0, self.stat_result.st_size) - self.set_range_headers(byte_range) - - async with aiofiles.open(self.path, mode="rb") as file: - await file.seek(byte_range.start) - await send( - { - "type": "http.response.start", - "status": 206, - "headers": self.raw_headers, - } - ) - if self.send_header_only: - await send( - {"type": "http.response.body", "body": b"", "more_body": False} - ) - else: - remaining_bytes = len(byte_range) - - if not byte_range: - await send( - {"type": "http.response.body", "body": b"", "more_body": False} - ) - return - - while remaining_bytes > 0: - chunk_size = min(self.chunk_size, remaining_bytes) - chunk = await file.read(chunk_size) - remaining_bytes -= len(chunk) - await send( - { - "type": "http.response.body", - "body": chunk, - "more_body": remaining_bytes > 0, - } - ) - - -class RangedStaticFiles(StaticFiles): - def file_response( - self, - full_path: str | os.PathLike, - stat_result: os.stat_result, - scope: Scope, - status_code: int = 200, - ) -> Response: - request_headers = Headers(scope=scope) - - if request_headers.get("range"): - response = self.ranged_file_response( - full_path, stat_result=stat_result, scope=scope - ) - else: - response = super().file_response( - full_path, stat_result=stat_result, scope=scope, status_code=status_code - ) - response.headers["accept-ranges"] = "bytes" - return response - - def ranged_file_response( - self, - full_path: str | os.PathLike, - stat_result: os.stat_result, - scope: Scope, - ) -> Response: - method = scope["method"] - request_headers = Headers(scope=scope) - - range_header = request_headers["range"] - - match = RANGE_REGEX.search(range_header) - if not match: - raise HTTPException(400) - - start, end = match.group("start"), match.group("end") - - range = OpenRange(int(start), int(end) if end else None) - - return RangedFileResponse( - full_path, range, stat_result=stat_result, method=method - ) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Copy-9f1657c4.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Copy-9f1657c4.js deleted file mode 100644 index 08722d64cdfc2bef38a66c6af2894238dc6ef286..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Copy-9f1657c4.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as p,e as c,s as h,J as a,K as e,p as u,M as i,n as o,A as d}from"./index-1d65707a.js";function v(l){let t,s;return{c(){t=a("svg"),s=a("polyline"),e(s,"points","20 6 9 17 4 12"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 24 24"),e(t,"fill","none"),e(t,"stroke","currentColor"),e(t,"stroke-width","3"),e(t,"stroke-linecap","round"),e(t,"stroke-linejoin","round")},m(n,r){u(n,t,r),i(t,s)},p:o,i:o,o,d(n){n&&d(t)}}}class m extends p{constructor(t){super(),c(this,t,null,v,h,{})}}function w(l){let t,s,n;return{c(){t=a("svg"),s=a("path"),n=a("path"),e(s,"fill","currentColor"),e(s,"d","M28 10v18H10V10h18m0-2H10a2 2 0 0 0-2 2v18a2 2 0 0 0 2 2h18a2 2 0 0 0 2-2V10a2 2 0 0 0-2-2Z"),e(n,"fill","currentColor"),e(n,"d","M4 18H2V4a2 2 0 0 1 2-2h14v2H4Z"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 32 32")},m(r,g){u(r,t,g),i(t,s),i(t,n)},p:o,i:o,o,d(r){r&&d(t)}}}class x extends p{constructor(t){super(),c(this,t,null,w,h,{})}}export{x as C,m as a}; -//# sourceMappingURL=Copy-9f1657c4.js.map diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/index.html b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/index.html deleted file mode 100644 index 2aa4ca9721b9f4f299b97b324cdf0eb9fc1111e0..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/index.html +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/__init__.py deleted file mode 100644 index 989e92c3458681a6f0be72ae4105ea742750d328..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230), -# containing no networking code at all, loosely modelled on hyper-h2's generic -# implementation of HTTP/2 (and in particular the h2.connection.H2Connection -# class). There's still a bunch of subtle details you need to get right if you -# want to make this actually useful, because it doesn't implement all the -# semantics to check that what you're asking to write to the wire is sensible, -# but at least it gets you out of dealing with the wire itself. - -from h11._connection import Connection, NEED_DATA, PAUSED -from h11._events import ( - ConnectionClosed, - Data, - EndOfMessage, - Event, - InformationalResponse, - Request, - Response, -) -from h11._state import ( - CLIENT, - CLOSED, - DONE, - ERROR, - IDLE, - MIGHT_SWITCH_PROTOCOL, - MUST_CLOSE, - SEND_BODY, - SEND_RESPONSE, - SERVER, - SWITCHED_PROTOCOL, -) -from h11._util import LocalProtocolError, ProtocolError, RemoteProtocolError -from h11._version import __version__ - -PRODUCT_ID = "python-h11/" + __version__ - - -__all__ = ( - "Connection", - "NEED_DATA", - "PAUSED", - "ConnectionClosed", - "Data", - "EndOfMessage", - "Event", - "InformationalResponse", - "Request", - "Response", - "CLIENT", - "CLOSED", - "DONE", - "ERROR", - "IDLE", - "MUST_CLOSE", - "SEND_BODY", - "SEND_RESPONSE", - "SERVER", - "SWITCHED_PROTOCOL", - "ProtocolError", - "LocalProtocolError", - "RemoteProtocolError", -) diff --git a/spaces/Datasculptor/AIart_sources_of_inspiration/README.md b/spaces/Datasculptor/AIart_sources_of_inspiration/README.md deleted file mode 100644 index 48228697075230cede0268cf8a50d51a81bb81bb..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/AIart_sources_of_inspiration/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Identifying Painting Authors -emoji: 🎨 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -duplicated_from: Datasculptor/Predicting_Authors ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/EAraid12/LoRA-DreamBooth-Training-UI/utils.py b/spaces/EAraid12/LoRA-DreamBooth-Training-UI/utils.py deleted file mode 100644 index 8fe82394db3a576d0b8bb94788cdc313a1b44392..0000000000000000000000000000000000000000 --- a/spaces/EAraid12/LoRA-DreamBooth-Training-UI/utils.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import annotations - -import pathlib - - -def find_exp_dirs(ignore_repo: bool = False) -> list[str]: - repo_dir = pathlib.Path(__file__).parent - exp_root_dir = repo_dir / 'experiments' - if not exp_root_dir.exists(): - return [] - exp_dirs = sorted(exp_root_dir.glob('*')) - exp_dirs = [ - exp_dir for exp_dir in exp_dirs - if (exp_dir / 'pytorch_lora_weights.bin').exists() - ] - if ignore_repo: - exp_dirs = [ - exp_dir for exp_dir in exp_dirs if not (exp_dir / '.git').exists() - ] - return [path.relative_to(repo_dir).as_posix() for path in exp_dirs] - - -def save_model_card( - save_dir: pathlib.Path, - base_model: str, - instance_prompt: str, - test_prompt: str = '', - test_image_dir: str = '', -) -> None: - image_str = '' - if test_prompt and test_image_dir: - image_paths = sorted((save_dir / test_image_dir).glob('*')) - if image_paths: - image_str = f'Test prompt: {test_prompt}\n' - for image_path in image_paths: - rel_path = image_path.relative_to(save_dir) - image_str += f'![{image_path.stem}]({rel_path})\n' - - model_card = f'''--- -license: creativeml-openrail-m -base_model: {base_model} -instance_prompt: {instance_prompt} -tags: -- stable-diffusion -- stable-diffusion-diffusers -- text-to-image -- diffusers -- lora -inference: true ---- -# LoRA DreamBooth - {save_dir.name} - -These are LoRA adaption weights for [{base_model}](https://huggingface.co/{base_model}). The weights were trained on the instance prompt "{instance_prompt}" using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. - -{image_str} -''' - - with open(save_dir / 'README.md', 'w') as f: - f.write(model_card) diff --git a/spaces/ECCV2022/bytetrack/yolox/tracker/kalman_filter.py b/spaces/ECCV2022/bytetrack/yolox/tracker/kalman_filter.py deleted file mode 100644 index deda8a26292b81bc6512a8f6145afabde6c16d7a..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/yolox/tracker/kalman_filter.py +++ /dev/null @@ -1,270 +0,0 @@ -# vim: expandtab:ts=4:sw=4 -import numpy as np -import scipy.linalg - - -""" -Table for the 0.95 quantile of the chi-square distribution with N degrees of -freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv -function and used as Mahalanobis gating threshold. -""" -chi2inv95 = { - 1: 3.8415, - 2: 5.9915, - 3: 7.8147, - 4: 9.4877, - 5: 11.070, - 6: 12.592, - 7: 14.067, - 8: 15.507, - 9: 16.919} - - -class KalmanFilter(object): - """ - A simple Kalman filter for tracking bounding boxes in image space. - - The 8-dimensional state space - - x, y, a, h, vx, vy, va, vh - - contains the bounding box center position (x, y), aspect ratio a, height h, - and their respective velocities. - - Object motion follows a constant velocity model. The bounding box location - (x, y, a, h) is taken as direct observation of the state space (linear - observation model). - - """ - - def __init__(self): - ndim, dt = 4, 1. - - # Create Kalman filter model matrices. - self._motion_mat = np.eye(2 * ndim, 2 * ndim) - for i in range(ndim): - self._motion_mat[i, ndim + i] = dt - self._update_mat = np.eye(ndim, 2 * ndim) - - # Motion and observation uncertainty are chosen relative to the current - # state estimate. These weights control the amount of uncertainty in - # the model. This is a bit hacky. - self._std_weight_position = 1. / 20 - self._std_weight_velocity = 1. / 160 - - def initiate(self, measurement): - """Create track from unassociated measurement. - - Parameters - ---------- - measurement : ndarray - Bounding box coordinates (x, y, a, h) with center position (x, y), - aspect ratio a, and height h. - - Returns - ------- - (ndarray, ndarray) - Returns the mean vector (8 dimensional) and covariance matrix (8x8 - dimensional) of the new track. Unobserved velocities are initialized - to 0 mean. - - """ - mean_pos = measurement - mean_vel = np.zeros_like(mean_pos) - mean = np.r_[mean_pos, mean_vel] - - std = [ - 2 * self._std_weight_position * measurement[3], - 2 * self._std_weight_position * measurement[3], - 1e-2, - 2 * self._std_weight_position * measurement[3], - 10 * self._std_weight_velocity * measurement[3], - 10 * self._std_weight_velocity * measurement[3], - 1e-5, - 10 * self._std_weight_velocity * measurement[3]] - covariance = np.diag(np.square(std)) - return mean, covariance - - def predict(self, mean, covariance): - """Run Kalman filter prediction step. - - Parameters - ---------- - mean : ndarray - The 8 dimensional mean vector of the object state at the previous - time step. - covariance : ndarray - The 8x8 dimensional covariance matrix of the object state at the - previous time step. - - Returns - ------- - (ndarray, ndarray) - Returns the mean vector and covariance matrix of the predicted - state. Unobserved velocities are initialized to 0 mean. - - """ - std_pos = [ - self._std_weight_position * mean[3], - self._std_weight_position * mean[3], - 1e-2, - self._std_weight_position * mean[3]] - std_vel = [ - self._std_weight_velocity * mean[3], - self._std_weight_velocity * mean[3], - 1e-5, - self._std_weight_velocity * mean[3]] - motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) - - #mean = np.dot(self._motion_mat, mean) - mean = np.dot(mean, self._motion_mat.T) - covariance = np.linalg.multi_dot(( - self._motion_mat, covariance, self._motion_mat.T)) + motion_cov - - return mean, covariance - - def project(self, mean, covariance): - """Project state distribution to measurement space. - - Parameters - ---------- - mean : ndarray - The state's mean vector (8 dimensional array). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). - - Returns - ------- - (ndarray, ndarray) - Returns the projected mean and covariance matrix of the given state - estimate. - - """ - std = [ - self._std_weight_position * mean[3], - self._std_weight_position * mean[3], - 1e-1, - self._std_weight_position * mean[3]] - innovation_cov = np.diag(np.square(std)) - - mean = np.dot(self._update_mat, mean) - covariance = np.linalg.multi_dot(( - self._update_mat, covariance, self._update_mat.T)) - return mean, covariance + innovation_cov - - def multi_predict(self, mean, covariance): - """Run Kalman filter prediction step (Vectorized version). - Parameters - ---------- - mean : ndarray - The Nx8 dimensional mean matrix of the object states at the previous - time step. - covariance : ndarray - The Nx8x8 dimensional covariance matrics of the object states at the - previous time step. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector and covariance matrix of the predicted - state. Unobserved velocities are initialized to 0 mean. - """ - std_pos = [ - self._std_weight_position * mean[:, 3], - self._std_weight_position * mean[:, 3], - 1e-2 * np.ones_like(mean[:, 3]), - self._std_weight_position * mean[:, 3]] - std_vel = [ - self._std_weight_velocity * mean[:, 3], - self._std_weight_velocity * mean[:, 3], - 1e-5 * np.ones_like(mean[:, 3]), - self._std_weight_velocity * mean[:, 3]] - sqr = np.square(np.r_[std_pos, std_vel]).T - - motion_cov = [] - for i in range(len(mean)): - motion_cov.append(np.diag(sqr[i])) - motion_cov = np.asarray(motion_cov) - - mean = np.dot(mean, self._motion_mat.T) - left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) - covariance = np.dot(left, self._motion_mat.T) + motion_cov - - return mean, covariance - - def update(self, mean, covariance, measurement): - """Run Kalman filter correction step. - - Parameters - ---------- - mean : ndarray - The predicted state's mean vector (8 dimensional). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). - measurement : ndarray - The 4 dimensional measurement vector (x, y, a, h), where (x, y) - is the center position, a the aspect ratio, and h the height of the - bounding box. - - Returns - ------- - (ndarray, ndarray) - Returns the measurement-corrected state distribution. - - """ - projected_mean, projected_cov = self.project(mean, covariance) - - chol_factor, lower = scipy.linalg.cho_factor( - projected_cov, lower=True, check_finite=False) - kalman_gain = scipy.linalg.cho_solve( - (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, - check_finite=False).T - innovation = measurement - projected_mean - - new_mean = mean + np.dot(innovation, kalman_gain.T) - new_covariance = covariance - np.linalg.multi_dot(( - kalman_gain, projected_cov, kalman_gain.T)) - return new_mean, new_covariance - - def gating_distance(self, mean, covariance, measurements, - only_position=False, metric='maha'): - """Compute gating distance between state distribution and measurements. - A suitable distance threshold can be obtained from `chi2inv95`. If - `only_position` is False, the chi-square distribution has 4 degrees of - freedom, otherwise 2. - Parameters - ---------- - mean : ndarray - Mean vector over the state distribution (8 dimensional). - covariance : ndarray - Covariance of the state distribution (8x8 dimensional). - measurements : ndarray - An Nx4 dimensional matrix of N measurements, each in - format (x, y, a, h) where (x, y) is the bounding box center - position, a the aspect ratio, and h the height. - only_position : Optional[bool] - If True, distance computation is done with respect to the bounding - box center position only. - Returns - ------- - ndarray - Returns an array of length N, where the i-th element contains the - squared Mahalanobis distance between (mean, covariance) and - `measurements[i]`. - """ - mean, covariance = self.project(mean, covariance) - if only_position: - mean, covariance = mean[:2], covariance[:2, :2] - measurements = measurements[:, :2] - - d = measurements - mean - if metric == 'gaussian': - return np.sum(d * d, axis=1) - elif metric == 'maha': - cholesky_factor = np.linalg.cholesky(covariance) - z = scipy.linalg.solve_triangular( - cholesky_factor, d.T, lower=True, check_finite=False, - overwrite_b=True) - squared_maha = np.sum(z * z, axis=0) - return squared_maha - else: - raise ValueError('invalid distance metric') \ No newline at end of file diff --git a/spaces/Eightone3D/anything-v3.0/app.py b/spaces/Eightone3D/anything-v3.0/app.py deleted file mode 100644 index 99a6a3762d5e337f08e960c4a31b4ac2467bca49..0000000000000000000000000000000000000000 --- a/spaces/Eightone3D/anything-v3.0/app.py +++ /dev/null @@ -1,8 +0,0 @@ -import gradio as gr - -description = """
- -
- """ - -gr.Interface.load("models/Linaqruf/anything-v3.0", description=description).launch() \ No newline at end of file diff --git a/spaces/ElainaFanBoy/MusicGen/tests/modules/test_seanet.py b/spaces/ElainaFanBoy/MusicGen/tests/modules/test_seanet.py deleted file mode 100644 index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000 --- a/spaces/ElainaFanBoy/MusicGen/tests/modules/test_seanet.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock -from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d - - -class TestSEANetModel: - - def test_base(self): - encoder = SEANetEncoder() - decoder = SEANetDecoder() - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_causal(self): - encoder = SEANetEncoder(causal=True) - decoder = SEANetDecoder(causal=True) - x = torch.randn(1, 1, 24000) - - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_conv_skip_connection(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False) - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_seanet_encoder_decoder_final_act(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False, final_activation='Tanh') - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in encoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - # here we add + 1 to n_blocks as we increment n_blocks just after the block - assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm - - def test_encoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_encoder_blocks_norm(encoder, disable_blocks, norm) - - def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in decoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, StreamableConvTranspose1d): - n_blocks += 1 - assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - assert resnet_layer.conv.norm_type == 'none' \ - if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - - def test_decoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_decoder_blocks_norm(decoder, disable_blocks, norm) - - def test_disable_norm_raises_exception(self): - # Invalid disable_norm_outer_blocks values raise exceptions - with pytest.raises(AssertionError): - SEANetEncoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) - - with pytest.raises(AssertionError): - SEANetDecoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) diff --git a/spaces/Elbhnasy/Eye-Tracking-Diagnosis/model.py b/spaces/Elbhnasy/Eye-Tracking-Diagnosis/model.py deleted file mode 100644 index 8fda72c5f813f93075629962abeb737d0e3ea4d7..0000000000000000000000000000000000000000 --- a/spaces/Elbhnasy/Eye-Tracking-Diagnosis/model.py +++ /dev/null @@ -1,37 +0,0 @@ -import torch -import torchvision -from torchvision import transforms -from torch import nn - - -def create_ResNetb34_model(num_classes:int=3,seed:int=42): - """ - Creates an ResNetb34 feature extractor model and transforms. - :param num_classes: number of classes in classifier head. - Defaults to 3. - :param seed: random seed value. - Defaults to 42. - :return: feature extractor model. - transforms (torchvision.transforms): ResNetb34 image transforms. - """ - # 1. Setup pretrained EffNetB1 weights - weigts = torchvision.models.ResNet34_Weights.DEFAULT - # 2. Get EffNetB2 transforms - transform = transforms.Compose([ - weigts.transforms(), - - #transforms.RandomHorizontalFlip(), - ]) - # 3. Setup pretrained model - model=torchvision.models.resnet34(weights= "DEFAULT") - - # 4. Freeze the base layers in the model (this will freeze all layers to begin with) - for param in model.parameters(): - param.requires_grad=True - - # 5. Change classifier head with random seed for reproducibility - torch.manual_seed(seed) - model.classifier=nn.Sequential(nn.Dropout(p=0.2,inplace=True), - nn.Linear(in_features=612,out_features=num_classes)) - return model,transform - diff --git a/spaces/EricA1/openjourney/app.py b/spaces/EricA1/openjourney/app.py deleted file mode 100644 index bea4accb45793c8e748731c184dee0ffaf509dd5..0000000000000000000000000000000000000000 --- a/spaces/EricA1/openjourney/app.py +++ /dev/null @@ -1,8 +0,0 @@ -import gradio as gr - -description = """
- -
- """ - -gr.Interface.load("models/prompthero/openjourney", description=description).launch() \ No newline at end of file diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/registry.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/registry.py deleted file mode 100644 index 655753b3b9cbd0cfe73fe93a77cf1fcc3db6d827..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/registry.py +++ /dev/null @@ -1,82 +0,0 @@ -# Modified from: https://github.com/facebookresearch/fvcore/blob/master/fvcore/common/registry.py # noqa: E501 - - -class Registry(): - """ - The registry that provides name -> object mapping, to support third-party - users' custom modules. - - To create a registry (e.g. a backbone registry): - - .. code-block:: python - - BACKBONE_REGISTRY = Registry('BACKBONE') - - To register an object: - - .. code-block:: python - - @BACKBONE_REGISTRY.register() - class MyBackbone(): - ... - - Or: - - .. code-block:: python - - BACKBONE_REGISTRY.register(MyBackbone) - """ - - def __init__(self, name): - """ - Args: - name (str): the name of this registry - """ - self._name = name - self._obj_map = {} - - def _do_register(self, name, obj): - assert (name not in self._obj_map), (f"An object named '{name}' was already registered " - f"in '{self._name}' registry!") - self._obj_map[name] = obj - - def register(self, obj=None): - """ - Register the given object under the the name `obj.__name__`. - Can be used as either a decorator or not. - See docstring of this class for usage. - """ - if obj is None: - # used as a decorator - def deco(func_or_class): - name = func_or_class.__name__ - self._do_register(name, func_or_class) - return func_or_class - - return deco - - # used as a function call - name = obj.__name__ - self._do_register(name, obj) - - def get(self, name): - ret = self._obj_map.get(name) - if ret is None: - raise KeyError(f"No object named '{name}' found in '{self._name}' registry!") - return ret - - def __contains__(self, name): - return name in self._obj_map - - def __iter__(self): - return iter(self._obj_map.items()) - - def keys(self): - return self._obj_map.keys() - - -DATASET_REGISTRY = Registry('dataset') -ARCH_REGISTRY = Registry('arch') -MODEL_REGISTRY = Registry('model') -LOSS_REGISTRY = Registry('loss') -METRIC_REGISTRY = Registry('metric') diff --git a/spaces/FrankZxShen/so-vits-svc-models-pcr/modules/modules.py b/spaces/FrankZxShen/so-vits-svc-models-pcr/modules/modules.py deleted file mode 100644 index 54290fd207b25e93831bd21005990ea137e6b50e..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/so-vits-svc-models-pcr/modules/modules.py +++ /dev/null @@ -1,342 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import modules.commons as commons -from modules.commons import init_weights, get_padding - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x diff --git a/spaces/GigiWasThere/Text/app.py b/spaces/GigiWasThere/Text/app.py deleted file mode 100644 index 94f075abc04d9c180ae808b3a7e01017400c9d7c..0000000000000000000000000000000000000000 --- a/spaces/GigiWasThere/Text/app.py +++ /dev/null @@ -1,12 +0,0 @@ -import gradio as gr -from gradio.mix import Parallel - -title="My First Text Generator" -descripition="Input Text." - - -model1=gr.Interface.load("huggingface/EleutherAI/gpt-j-6B") -model2=gr.Interface.load("huggingface/gpt2") -model3=gr.Interface.load("huggingface/EleutherAI/gpt-neo-125M") - -gr.Parallel(model1, model2, model3,title=title,descripition=descripition).launch() diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/utils/model_utils.py b/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/utils/model_utils.py deleted file mode 100644 index e51e95578f72b3218d6d832e3b604193cb68c1d7..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/utils/model_utils.py +++ /dev/null @@ -1,35 +0,0 @@ -import torch -import argparse -from models.psp import pSp -from models.encoders.psp_encoders import Encoder4Editing - - -def setup_model(checkpoint_path, device='cuda'): - ckpt = torch.load(checkpoint_path, map_location='cpu') - opts = ckpt['opts'] - - opts['checkpoint_path'] = checkpoint_path - opts['device'] = device - opts = argparse.Namespace(**opts) - - net = pSp(opts) - net.eval() - net = net.to(device) - return net, opts - - -def load_e4e_standalone(checkpoint_path, device='cuda'): - ckpt = torch.load(checkpoint_path, map_location='cpu') - opts = argparse.Namespace(**ckpt['opts']) - e4e = Encoder4Editing(50, 'ir_se', opts) - e4e_dict = {k.replace('encoder.', ''): v for k, v in ckpt['state_dict'].items() if k.startswith('encoder.')} - e4e.load_state_dict(e4e_dict) - e4e.eval() - e4e = e4e.to(device) - latent_avg = ckpt['latent_avg'].to(device) - - def add_latent_avg(model, inputs, outputs): - return outputs + latent_avg.repeat(outputs.shape[0], 1, 1) - - e4e.register_forward_hook(add_latent_avg) - return e4e diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index 8e36c9b3a506eacd97bfadee8d167886eef74cb7..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 5e04aa7c6ac050d119e07b715e2082f692e1a1de..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 6804a5781369d1031f179d421a3b5a160fd575d3..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index e6d58a67b3b4dddf3da42efca30fa599e623f183..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/fad.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/fad.py deleted file mode 100644 index de66138dbb14fd4246bbfe590bddfd5beaf1ed8c..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/fad.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from pathlib import Path -import os -import subprocess -import tempfile -import typing as tp - -from audiocraft.data.audio import audio_write -from audiocraft.data.audio_utils import convert_audio -import flashy -import torch -import torchmetrics - -from ..environment import AudioCraftEnvironment - - -logger = logging.getLogger(__name__) - -VGGISH_SAMPLE_RATE = 16_000 -VGGISH_CHANNELS = 1 - - -class FrechetAudioDistanceMetric(torchmetrics.Metric): - """Fréchet Audio Distance computation based on official TensorFlow implementation from Google Research. - - From: D.C. Dowson & B.V. Landau The Fréchet distance between - multivariate normal distributions - https://doi.org/10.1016/0047-259X(82)90077-X - The Fréchet distance between two multivariate gaussians, - `X ~ N(mu_x, sigma_x)` and `Y ~ N(mu_y, sigma_y)`, is `d^2`. - d^2 = (mu_x - mu_y)^2 + Tr(sigma_x + sigma_y - 2 * sqrt(sigma_x*sigma_y)) - = (mu_x - mu_y)^2 + Tr(sigma_x) + Tr(sigma_y) - - 2 * Tr(sqrt(sigma_x*sigma_y))) - - To use this FAD computation metric, you need to have the proper Frechet Audio Distance tool setup - from: https://github.com/google-research/google-research/tree/master/frechet_audio_distance - We provide the below instructions as reference but we do not guarantee for further support - in frechet_audio_distance installation. This was tested with python 3.10, cuda 11.8, tensorflow 2.12.0. - - We recommend installing the frechet_audio_distance library in a dedicated env (e.g. conda). - - 1. Get the code and models following the repository instructions. We used the steps below: - git clone git@github.com:google-research/google-research.git - git clone git@github.com:tensorflow/models.git - mkdir google-research/tensorflow_models - touch google-research/tensorflow_models/__init__.py - cp -r models/research/audioset google-research/tensorflow_models/ - touch google-research/tensorflow_models/audioset/__init__.py - echo "from .vggish import mel_features, vggish_params, vggish_slim" > \ - google-research/tensorflow_models/audioset/__init__.py - # we can now remove the tensorflow models repository - # rm -r models - cd google-research - Follow the instructions to download the vggish checkpoint. AudioCraft base configuration - assumes it is placed in the AudioCraft reference dir. - - Note that we operate the following changes for the code to work with TensorFlow 2.X and python 3: - - Update xrange for range in: - https://github.com/google-research/google-research/blob/master/frechet_audio_distance/audioset_model.py - - Update `tf_record = tf.python_io.tf_record_iterator(filename).next()` to - `tf_record = tf.python_io.tf_record_iterator(filename).__next__()` in - https://github.com/google-research/google-research/blob/master/frechet_audio_distance/fad_utils.py - - Update `import vggish_params as params` to `from . import vggish_params as params` in: - https://github.com/tensorflow/models/blob/master/research/audioset/vggish/vggish_slim.py - - Add flag to provide a given batch size for running the AudioSet model in: - https://github.com/google-research/google-research/blob/master/frechet_audio_distance/create_embeddings_main.py - ``` - flags.DEFINE_integer('batch_size', 64, - 'Number of samples in the batch for AudioSet model.') - ``` - Ensure you pass the flag to the create_embeddings_beam.create_pipeline function, adding: - `batch_size=FLAGS.batch_size` to the provided parameters. - - 2. Follow instructions for the library installation and a valid TensorFlow installation - ``` - # e.g. instructions from: https://www.tensorflow.org/install/pip - conda install -c conda-forge cudatoolkit=11.8.0 - python3 -m pip install nvidia-cudnn-cu11==8.6.0.163 tensorflow==2.12.* - mkdir -p $CONDA_PREFIX/etc/conda/activate.d - echo 'CUDNN_PATH=$(dirname $(python -c "import nvidia.cudnn;print(nvidia.cudnn.__file__)"))' \ - >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/:$CUDNN_PATH/lib' \ - >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - source $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - # Verify install: on a machine with GPU device - python3 -c "import tensorflow as tf; print(tf.config.list_physical_devices('GPU'))" - ``` - - Now install frechet_audio_distance required dependencies: - ``` - # We assume we already have TensorFlow installed from the above steps - pip install apache-beam numpy scipy tf_slim - ``` - - Finally, follow remaining library instructions to ensure you have a working frechet_audio_distance setup - (you may want to specify --model_ckpt flag pointing to the model's path). - - 3. AudioCraft's FrechetAudioDistanceMetric requires 2 environment variables pointing to the python executable - and Tensorflow library path from the above installation steps: - export TF_PYTHON_EXE="" - export TF_LIBRARY_PATH="" - - e.g. assuming we have installed everything in a dedicated conda env - with python 3.10 that is currently active: - export TF_PYTHON_EXE="$CONDA_PREFIX/bin/python" - export TF_LIBRARY_PATH="$CONDA_PREFIX/lib/python3.10/site-packages/nvidia/cudnn/lib" - - Finally you may want to export the following variable: - export TF_FORCE_GPU_ALLOW_GROWTH=true - See: https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth - - You can save those environment variables in your training conda env, when currently active: - `$CONDA_PREFIX/etc/conda/activate.d/env_vars.sh` - e.g. assuming the env with TensorFlow and frechet_audio_distance install is named ac_eval, - and the training conda env is named audiocraft: - ``` - # activate training env - conda activate audiocraft - # get path to all envs - CONDA_ENV_DIR=$(dirname $CONDA_PREFIX) - # export pointers to evaluation env for using TensorFlow in FrechetAudioDistanceMetric - touch $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - echo 'export TF_PYTHON_EXE="$CONDA_ENV_DIR/ac_eval/bin/python"' >> \ - $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - echo 'export TF_LIBRARY_PATH="$CONDA_ENV_DIR/ac_eval/lib/python3.10/site-packages/nvidia/cudnn/lib"' >> \ - $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - # optionally: - echo 'export TF_FORCE_GPU_ALLOW_GROWTH=true' >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - # you may need to reactivate the audiocraft env for this to take effect - ``` - - Args: - bin (Path or str): Path to installed frechet audio distance code. - model_path (Path or str): Path to Tensorflow checkpoint for the model - used to compute statistics over the embedding beams. - format (str): Audio format used to save files. - log_folder (Path or str, optional): Path where to write process logs. - """ - def __init__(self, bin: tp.Union[Path, str], model_path: tp.Union[Path, str], - format: str = "wav", batch_size: tp.Optional[int] = None, - log_folder: tp.Optional[tp.Union[Path, str]] = None): - super().__init__() - self.model_sample_rate = VGGISH_SAMPLE_RATE - self.model_channels = VGGISH_CHANNELS - self.model_path = AudioCraftEnvironment.resolve_reference_path(model_path) - assert Path(self.model_path).exists(), f"Could not find provided model checkpoint path at: {self.model_path}" - self.format = format - self.batch_size = batch_size - self.bin = bin - self.tf_env = {"PYTHONPATH": str(self.bin)} - self.python_path = os.environ.get('TF_PYTHON_EXE') or 'python' - logger.info("Python exe for TF is %s", self.python_path) - if 'TF_LIBRARY_PATH' in os.environ: - self.tf_env['LD_LIBRARY_PATH'] = os.environ['TF_LIBRARY_PATH'] - if 'TF_FORCE_GPU_ALLOW_GROWTH' in os.environ: - self.tf_env['TF_FORCE_GPU_ALLOW_GROWTH'] = os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] - logger.info("Env for TF is %r", self.tf_env) - self.reset(log_folder) - self.add_state("total_files", default=torch.tensor(0.), dist_reduce_fx="sum") - - def reset(self, log_folder: tp.Optional[tp.Union[Path, str]] = None): - """Reset torchmetrics.Metrics state.""" - log_folder = Path(log_folder or tempfile.mkdtemp()) - self.tmp_dir = log_folder / 'fad' - self.tmp_dir.mkdir(exist_ok=True) - self.samples_tests_dir = self.tmp_dir / 'tests' - self.samples_tests_dir.mkdir(exist_ok=True) - self.samples_background_dir = self.tmp_dir / 'background' - self.samples_background_dir.mkdir(exist_ok=True) - self.manifest_tests = self.tmp_dir / 'files_tests.cvs' - self.manifest_background = self.tmp_dir / 'files_background.cvs' - self.stats_tests_dir = self.tmp_dir / 'stats_tests' - self.stats_background_dir = self.tmp_dir / 'stats_background' - self.counter = 0 - - def update(self, preds: torch.Tensor, targets: torch.Tensor, - sizes: torch.Tensor, sample_rates: torch.Tensor, - stems: tp.Optional[tp.List[str]] = None): - """Update torchmetrics.Metrics by saving the audio and updating the manifest file.""" - assert preds.shape == targets.shape, f"preds={preds.shape} != targets={targets.shape}" - num_samples = preds.shape[0] - assert num_samples == sizes.size(0) and num_samples == sample_rates.size(0) - assert stems is None or num_samples == len(set(stems)) - for i in range(num_samples): - self.total_files += 1 # type: ignore - self.counter += 1 - wav_len = int(sizes[i].item()) - sample_rate = int(sample_rates[i].item()) - pred_wav = preds[i] - target_wav = targets[i] - pred_wav = pred_wav[..., :wav_len] - target_wav = target_wav[..., :wav_len] - stem_name = stems[i] if stems is not None else f'sample_{self.counter}_{flashy.distrib.rank()}' - # dump audio files - try: - pred_wav = convert_audio( - pred_wav.unsqueeze(0), from_rate=sample_rate, - to_rate=self.model_sample_rate, to_channels=1).squeeze(0) - audio_write( - self.samples_tests_dir / stem_name, pred_wav, sample_rate=self.model_sample_rate, - format=self.format, strategy="peak") - except Exception as e: - logger.error(f"Exception occured when saving tests files for FAD computation: {repr(e)} - {e}") - try: - # for the ground truth audio, we enforce the 'peak' strategy to avoid modifying - # the original audio when writing it - target_wav = convert_audio( - target_wav.unsqueeze(0), from_rate=sample_rate, - to_rate=self.model_sample_rate, to_channels=1).squeeze(0) - audio_write( - self.samples_background_dir / stem_name, target_wav, sample_rate=self.model_sample_rate, - format=self.format, strategy="peak") - except Exception as e: - logger.error(f"Exception occured when saving background files for FAD computation: {repr(e)} - {e}") - - def _get_samples_name(self, is_background: bool): - return 'background' if is_background else 'tests' - - def _create_embedding_beams(self, is_background: bool, gpu_index: tp.Optional[int] = None): - if is_background: - input_samples_dir = self.samples_background_dir - input_filename = self.manifest_background - stats_name = self.stats_background_dir - else: - input_samples_dir = self.samples_tests_dir - input_filename = self.manifest_tests - stats_name = self.stats_tests_dir - beams_name = self._get_samples_name(is_background) - log_file = self.tmp_dir / f'fad_logs_create_beams_{beams_name}.log' - - logger.info(f"Scanning samples folder to fetch list of files: {input_samples_dir}") - with open(input_filename, "w") as fout: - for path in Path(input_samples_dir).glob(f"*.{self.format}"): - fout.write(f"{str(path)}\n") - - cmd = [ - self.python_path, "-m", - "frechet_audio_distance.create_embeddings_main", - "--model_ckpt", f"{self.model_path}", - "--input_files", f"{str(input_filename)}", - "--stats", f"{str(stats_name)}", - ] - if self.batch_size is not None: - cmd += ["--batch_size", str(self.batch_size)] - logger.info(f"Launching frechet_audio_distance embeddings main method: {' '.join(cmd)} on {beams_name}") - env = os.environ - if gpu_index is not None: - env["CUDA_VISIBLE_DEVICES"] = str(gpu_index) - process = subprocess.Popen( - cmd, stdout=open(log_file, "w"), env={**env, **self.tf_env}, stderr=subprocess.STDOUT) - return process, log_file - - def _compute_fad_score(self, gpu_index: tp.Optional[int] = None): - cmd = [ - self.python_path, "-m", "frechet_audio_distance.compute_fad", - "--test_stats", f"{str(self.stats_tests_dir)}", - "--background_stats", f"{str(self.stats_background_dir)}", - ] - logger.info(f"Launching frechet_audio_distance compute fad method: {' '.join(cmd)}") - env = os.environ - if gpu_index is not None: - env["CUDA_VISIBLE_DEVICES"] = str(gpu_index) - result = subprocess.run(cmd, env={**env, **self.tf_env}, capture_output=True) - if result.returncode: - logger.error( - "Error with FAD computation from stats: \n %s \n %s", - result.stdout.decode(), result.stderr.decode() - ) - raise RuntimeError("Error while executing FAD computation from stats") - try: - # result is "FAD: (d+).(d+)" hence we remove the prefix with (d+) being one digit or more - fad_score = float(result.stdout[4:]) - return fad_score - except Exception as e: - raise RuntimeError(f"Error parsing FAD score from command stdout: {e}") - - def _log_process_result(self, returncode: int, log_file: tp.Union[Path, str], is_background: bool) -> None: - beams_name = self._get_samples_name(is_background) - if returncode: - with open(log_file, "r") as f: - error_log = f.read() - logger.error(error_log) - os._exit(1) - else: - logger.info(f"Successfully computed embedding beams on {beams_name} samples.") - - def _parallel_create_embedding_beams(self, num_of_gpus: int): - assert num_of_gpus > 0 - logger.info("Creating embeddings beams in a parallel manner on different GPUs") - tests_beams_process, tests_beams_log_file = self._create_embedding_beams(is_background=False, gpu_index=0) - bg_beams_process, bg_beams_log_file = self._create_embedding_beams(is_background=True, gpu_index=1) - tests_beams_code = tests_beams_process.wait() - bg_beams_code = bg_beams_process.wait() - self._log_process_result(tests_beams_code, tests_beams_log_file, is_background=False) - self._log_process_result(bg_beams_code, bg_beams_log_file, is_background=True) - - def _sequential_create_embedding_beams(self): - logger.info("Creating embeddings beams in a sequential manner") - tests_beams_process, tests_beams_log_file = self._create_embedding_beams(is_background=False) - tests_beams_code = tests_beams_process.wait() - self._log_process_result(tests_beams_code, tests_beams_log_file, is_background=False) - bg_beams_process, bg_beams_log_file = self._create_embedding_beams(is_background=True) - bg_beams_code = bg_beams_process.wait() - self._log_process_result(bg_beams_code, bg_beams_log_file, is_background=True) - - @flashy.distrib.rank_zero_only - def _local_compute_frechet_audio_distance(self): - """Compute Frechet Audio Distance score calling TensorFlow API.""" - num_of_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0 - if num_of_gpus > 1: - self._parallel_create_embedding_beams(num_of_gpus) - else: - self._sequential_create_embedding_beams() - fad_score = self._compute_fad_score(gpu_index=0) - return fad_score - - def compute(self) -> float: - """Compute metrics.""" - assert self.total_files.item() > 0, "No files dumped for FAD computation!" # type: ignore - fad_score = self._local_compute_frechet_audio_distance() - logger.warning(f"FAD score = {fad_score}") - fad_score = flashy.distrib.broadcast_object(fad_score, src=0) - return fad_score diff --git a/spaces/GuyYariv/AudioToken/app.py b/spaces/GuyYariv/AudioToken/app.py deleted file mode 100644 index 1a85dcd47f31dc88bea964547491c948588d7f47..0000000000000000000000000000000000000000 --- a/spaces/GuyYariv/AudioToken/app.py +++ /dev/null @@ -1,162 +0,0 @@ -import torch -from diffusers.loaders import AttnProcsLayers -from transformers import CLIPTextModel, CLIPTokenizer -from modules.beats.BEATs import BEATs, BEATsConfig -from modules.AudioToken.embedder import FGAEmbedder -from diffusers import AutoencoderKL, UNet2DConditionModel -from diffusers.models.attention_processor import LoRAAttnProcessor -from diffusers import StableDiffusionPipeline -import numpy as np -import gradio as gr -from scipy import signal - - -class AudioTokenWrapper(torch.nn.Module): - """Simple wrapper module for Stable Diffusion that holds all the models together""" - - def __init__( - self, - lora, - device, - ): - - super().__init__() - # Load scheduler and models - self.tokenizer = CLIPTokenizer.from_pretrained( - "CompVis/stable-diffusion-v1-4", subfolder="tokenizer" - ) - self.text_encoder = CLIPTextModel.from_pretrained( - "CompVis/stable-diffusion-v1-4", subfolder="text_encoder", revision=None - ) - self.unet = UNet2DConditionModel.from_pretrained( - "CompVis/stable-diffusion-v1-4", subfolder="unet", revision=None - ) - self.vae = AutoencoderKL.from_pretrained( - "CompVis/stable-diffusion-v1-4", subfolder="vae", revision=None - ) - - checkpoint = torch.load( - 'models/BEATs_iter3_plus_AS2M_finetuned_on_AS2M_cpt2.pt') - cfg = BEATsConfig(checkpoint['cfg']) - self.aud_encoder = BEATs(cfg) - self.aud_encoder.load_state_dict(checkpoint['model']) - self.aud_encoder.predictor = None - input_size = 768 * 3 - self.embedder = FGAEmbedder(input_size=input_size, output_size=768) - - self.vae.eval() - self.unet.eval() - self.text_encoder.eval() - self.aud_encoder.eval() - - if lora: - # Set correct lora layers - lora_attn_procs = {} - for name in self.unet.attn_processors.keys(): - cross_attention_dim = None if name.endswith( - "attn1.processor") else self.unet.config.cross_attention_dim - if name.startswith("mid_block"): - hidden_size = self.unet.config.block_out_channels[-1] - elif name.startswith("up_blocks"): - block_id = int(name[len("up_blocks.")]) - hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id] - elif name.startswith("down_blocks"): - block_id = int(name[len("down_blocks.")]) - hidden_size = self.unet.config.block_out_channels[block_id] - - lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, - cross_attention_dim=cross_attention_dim) - - self.unet.set_attn_processor(lora_attn_procs) - self.lora_layers = AttnProcsLayers(self.unet.attn_processors) - self.lora_layers.eval() - lora_layers_learned_embeds = 'models/lora_layers_learned_embeds.bin' - self.lora_layers.load_state_dict(torch.load(lora_layers_learned_embeds, map_location=device)) - self.unet.load_attn_procs(lora_layers_learned_embeds) - - self.embedder.eval() - embedder_learned_embeds = 'models/embedder_learned_embeds.bin' - self.embedder.load_state_dict(torch.load(embedder_learned_embeds, map_location=device)) - - self.placeholder_token = '<*>' - num_added_tokens = self.tokenizer.add_tokens(self.placeholder_token) - if num_added_tokens == 0: - raise ValueError( - f"The tokenizer already contains the token {self.placeholder_token}. Please pass a different" - " `placeholder_token` that is not already in the tokenizer." - ) - self.placeholder_token_id = self.tokenizer.convert_tokens_to_ids(self.placeholder_token) - # Resize the token embeddings as we are adding new special tokens to the tokenizer - self.text_encoder.resize_token_embeddings(len(self.tokenizer)) - - -def greet(audio): - sample_rate, audio = audio - audio = audio.astype(np.float32, order='C') / 32768.0 - desired_sample_rate = 16000 - - if audio.ndim == 2: - audio = audio.sum(axis=1) / 2 - - if sample_rate != desired_sample_rate: - # Calculate the resampling ratio - resample_ratio = desired_sample_rate / sample_rate - - # Determine the new length of the audio data after downsampling - new_length = int(len(audio) * resample_ratio) - - # Downsample the audio data using resample - audio = signal.resample(audio, new_length) - - weight_dtype = torch.float32 - prompt = 'a photo of <*>' - - audio_values = torch.unsqueeze(torch.tensor(audio), dim=0).to(device).to(dtype=weight_dtype) - if audio_values.ndim == 1: - audio_values = torch.unsqueeze(audio_values, dim=0) - aud_features = model.aud_encoder.extract_features(audio_values)[1] - audio_token = model.embedder(aud_features) - - token_embeds = model.text_encoder.get_input_embeddings().weight.data - token_embeds[model.placeholder_token_id] = audio_token.clone() - - pipeline = StableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", - tokenizer=model.tokenizer, - text_encoder=model.text_encoder, - vae=model.vae, - unet=model.unet, - ).to(device) - image = pipeline(prompt, num_inference_steps=40, guidance_scale=7.5).images[0] - return image - - -if __name__ == "__main__": - - lora = False - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - model = AudioTokenWrapper(lora, device) - model = model.to(device) - description = """

- This is a demo of AudioToken: Adaptation of Text-Conditioned Diffusion Models for Audio-to-Image Generation.

- A novel method utilizing latent diffusion models trained for text-to-image-generation to generate images conditioned on audio recordings. Using a pre-trained audio encoding model, the proposed method encodes audio into a new token, which can be considered as an adaptation layer between the audio and text representations.

- For more information, please see the original paper and repo. -

""" - - examples = [ - # ["assets/train.wav"], - ["assets/dog barking.wav"], - # ["assets/airplane taking off.wav"], - # ["assets/electric guitar.wav"], - # ["assets/female sings.wav"], - ] - - demo = gr.Interface( - fn=greet, - inputs="audio", - outputs="image", - title='AudioToken', - description=description, - examples=examples - ) - demo.launch() diff --git a/spaces/HaloMaster/chinesesummary/fengshen/models/transfo_xl_denoise/tokenization_transfo_xl_denoise.py b/spaces/HaloMaster/chinesesummary/fengshen/models/transfo_xl_denoise/tokenization_transfo_xl_denoise.py deleted file mode 100644 index 9b454c8cc236a114074c8a099878f8e464f87ad5..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/models/transfo_xl_denoise/tokenization_transfo_xl_denoise.py +++ /dev/null @@ -1,82 +0,0 @@ -# coding=utf-8 -# Copyright 2022 IDEA-CCNL and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tokenization classes for TransfoXLDenoise.""" - -import sentencepiece as spm -from transformers.tokenization_utils import PreTrainedTokenizer - -VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} - -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "transformer-xl-1b-base": - "https://huggingface.co/IDEA-CCNL/Bigan-Transformer-XL-denoise-1.1B/resolve/main/spiece.model", - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "transformer-xl-1b-base": 512, -} - - -class TransfoXLDenoiseTokenizer(PreTrainedTokenizer): - """ - Construct a TransfoXLDenoise tokenizer. Based on pretrained sentence piece - - Args: - vocab_file (`str`): - Path to the vocabulary file. - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] - SPIECE_UNDERLINE = "▁" - - def __init__( - self, - vocab_file, - unk_token="<|endoftext|>", - bos_token="<|endoftext|>", - eos_token="<|endoftext|>", - **kwargs - ): - super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) - "Initialisation" - self.sp_model = spm.SentencePieceProcessor() - self.sp_model.Load(vocab_file) - - @property - def vocab_size(self): - "Returns vocab size" - return len(self.sp_model) - - def _tokenize(self, text): - """ Returns a tokenized string. """ - return self.sp_model.EncodeAsPieces(text) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.sp_model.PieceToId(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.sp_model.IdToPiece(index) - - def convert_tokens_to_string(self, tokens): - """ Converts a sequence of tokens (string) in a single string. """ - out_string = "".join(tokens).replace(self.SPIECE_UNDERLINE, " ").strip() - return out_string diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/noisychannel/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/noisychannel/__init__.py deleted file mode 100644 index 89f1aef4f6328d25425e0bcabb42dfffd2ed35f0..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/noisychannel/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .rerank_options import * # noqa diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py deleted file mode 100644 index 36c85d1e2f60487494a92207feb4685e78db8aa2..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import sys - - -def main(): - for line in sys.stdin: - print(line.replace(" ", "").replace("|", " ").strip()) - - -if __name__ == "__main__": - main() diff --git a/spaces/HarryLee/eCommerceImageCaptioning/utils/transforms.py b/spaces/HarryLee/eCommerceImageCaptioning/utils/transforms.py deleted file mode 100644 index 0a9edf6c3da3052758cb36bcfe1f50ba69cc6f32..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/utils/transforms.py +++ /dev/null @@ -1,508 +0,0 @@ -import random - -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as F -import numpy as np -from PIL import Image - - -def crop(image, target, region, delete=True): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area"] - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "polygons" in target: - polygons = target["polygons"] - num_polygons = polygons.shape[0] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - start_coord = torch.cat([torch.tensor([j, i], dtype=torch.float32) - for _ in range(polygons.shape[1] // 2)], dim=0) - cropped_boxes = polygons - start_coord - cropped_boxes = torch.min(cropped_boxes.reshape(num_polygons, -1, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - target["polygons"] = cropped_boxes.reshape(num_polygons, -1) - fields.append("polygons") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target['masks'] = target['masks'][:, i:i + h, j:j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if delete and ("boxes" in target or "masks" in target): - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target['boxes'].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target['masks'].flatten(1).any(1) - - for field in fields: - target[field] = target[field][keep.tolist()] - - return cropped_image, target - - -def hflip(image, target): - flipped_image = F.hflip(image) - - w, h = image.size - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) - target["boxes"] = boxes - - if "polygons" in target: - polygons = target["polygons"] - num_polygons = polygons.shape[0] - polygons = polygons.reshape(num_polygons, -1, 2) * torch.as_tensor([-1, 1]) + torch.as_tensor([w, 0]) - target["polygons"] = polygons - - if "masks" in target: - target['masks'] = target['masks'].flip(-1) - - return flipped_image, target - - -def resize(image, target, size, max_size=None): - # size can be min_size (scalar) or (w, h) tuple - - def get_size_with_aspect_ratio(image_size, size, max_size=None): - w, h = image_size - - if (w <= h and w == size) or (h <= w and h == size): - if max_size is not None: - max_size = int(max_size) - h = min(h, max_size) - w = min(w, max_size) - return (h, w) - - if w < h: - ow = size - oh = int(size * h / w) - else: - oh = size - ow = int(size * w / h) - - if max_size is not None: - max_size = int(max_size) - oh = min(oh, max_size) - ow = min(ow, max_size) - - return (oh, ow) - - def get_size(image_size, size, max_size=None): - if isinstance(size, (list, tuple)): - return size[::-1] - else: - return get_size_with_aspect_ratio(image_size, size, max_size) - - size = get_size(image.size, size, max_size) - rescaled_image = F.resize(image, size, interpolation=Image.BICUBIC) - - if target is None: - return rescaled_image - - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) - ratio_width, ratio_height = ratios - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) - target["boxes"] = scaled_boxes - - if "polygons" in target: - polygons = target["polygons"] - scaled_ratio = torch.cat([torch.tensor([ratio_width, ratio_height]) - for _ in range(polygons.shape[1] // 2)], dim=0) - scaled_polygons = polygons * scaled_ratio - target["polygons"] = scaled_polygons - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - h, w = size - target["size"] = torch.tensor([h, w]) - - if "masks" in target: - assert False - # target['masks'] = interpolate( - # target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - - return rescaled_image, target - - -class CenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.)) - crop_left = int(round((image_width - crop_width) / 2.)) - return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) - - -class ObjectCenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - - x0 = float(target['boxes'][0][0]) - y0 = float(target['boxes'][0][1]) - x1 = float(target['boxes'][0][2]) - y1 = float(target['boxes'][0][3]) - - center_x = (x0 + x1) / 2 - center_y = (y0 + y1) / 2 - crop_left = max(center_x-crop_width/2 + min(image_width-center_x-crop_width/2, 0), 0) - crop_top = max(center_y-crop_height/2 + min(image_height-center_y-crop_height/2, 0), 0) - - return crop(img, target, (crop_top, crop_left, crop_height, crop_width), delete=False) - - -class RandomHorizontalFlip(object): - def __init__(self, p=0.5): - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return hflip(img, target) - return img, target - - -class RandomResize(object): - def __init__(self, sizes, max_size=None, equal=False): - assert isinstance(sizes, (list, tuple)) - self.sizes = sizes - self.max_size = max_size - self.equal = equal - - def __call__(self, img, target=None): - size = random.choice(self.sizes) - if self.equal: - return resize(img, target, size, size) - else: - return resize(img, target, size, self.max_size) - - -class ToTensor(object): - def __call__(self, img, target): - return F.to_tensor(img), target - - -class Normalize(object): - def __init__(self, mean, std, max_image_size=512): - self.mean = mean - self.std = std - self.max_image_size = max_image_size - - def __call__(self, image, target=None): - image = F.normalize(image, mean=self.mean, std=self.std) - if target is None: - return image, None - target = target.copy() - # h, w = image.shape[-2:] - h, w = target["size"][0], target["size"][1] - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes / self.max_image_size - target["boxes"] = boxes - if "polygons" in target: - polygons = target["polygons"] - scale = torch.cat([torch.tensor([w, h], dtype=torch.float32) - for _ in range(polygons.shape[1] // 2)], dim=0) - polygons = polygons / scale - target["polygons"] = polygons - return image, target - - -class Compose(object): - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - def __repr__(self): - format_string = self.__class__.__name__ + "(" - for t in self.transforms: - format_string += "\n" - format_string += " {0}".format(t) - format_string += "\n)" - return format_string - - -class LargeScaleJitter(object): - """ - implementation of large scale jitter from copy_paste - """ - - def __init__(self, output_size=512, aug_scale_min=0.3, aug_scale_max=2.0): - self.desired_size = torch.tensor([output_size]) - self.aug_scale_min = aug_scale_min - self.aug_scale_max = aug_scale_max - - def rescale_target(self, scaled_size, image_size, target): - # compute rescaled targets - image_scale = scaled_size / image_size - ratio_height, ratio_width = image_scale - - target = target.copy() - target["size"] = scaled_size - - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) - target["boxes"] = scaled_boxes - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - if "masks" in target: - assert False - masks = target['masks'] - # masks = interpolate( - # masks[:, None].float(), scaled_size, mode="nearest")[:, 0] > 0.5 - target['masks'] = masks - return target - - def crop_target(self, region, target): - i, j, h, w = region - fields = ["labels", "area"] - - target = target.copy() - target["size"] = torch.tensor([h, w]) - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target['masks'] = target['masks'][:, i:i + h, j:j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target['boxes'].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target['masks'].flatten(1).any(1) - - for field in fields: - target[field] = target[field][keep.tolist()] - return target - - def pad_target(self, padding, target): - target = target.copy() - if "masks" in target: - target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[1], 0, padding[0])) - return target - - def __call__(self, image, target=None): - image_size = image.size - image_size = torch.tensor(image_size[::-1]) - - random_scale = torch.rand(1) * (self.aug_scale_max - self.aug_scale_min) + self.aug_scale_min - scaled_size = (random_scale * self.desired_size).round() - - scale = torch.maximum(scaled_size / image_size[0], scaled_size / image_size[1]) - scaled_size = (image_size * scale).round().int() - - scaled_image = F.resize(image, scaled_size.tolist(), interpolation=Image.BICUBIC) - - if target is not None: - target = self.rescale_target(scaled_size, image_size, target) - - # randomly crop or pad images - if random_scale >= 1: - # Selects non-zero random offset (x, y) if scaled image is larger than desired_size. - max_offset = scaled_size - self.desired_size - offset = (max_offset * torch.rand(2)).floor().int() - region = (offset[0].item(), offset[1].item(), - self.desired_size[0].item(), self.desired_size[0].item()) - output_image = F.crop(scaled_image, *region) - if target is not None: - target = self.crop_target(region, target) - else: - assert False - padding = self.desired_size - scaled_size - output_image = F.pad(scaled_image, [0, 0, padding[1].item(), padding[0].item()]) - if target is not None: - target = self.pad_target(padding, target) - - return output_image, target - - -class OriginLargeScaleJitter(object): - """ - implementation of large scale jitter from copy_paste - """ - - def __init__(self, output_size=512, aug_scale_min=0.3, aug_scale_max=2.0): - self.desired_size = torch.tensor(output_size) - self.aug_scale_min = aug_scale_min - self.aug_scale_max = aug_scale_max - - def rescale_target(self, scaled_size, image_size, target): - # compute rescaled targets - image_scale = scaled_size / image_size - ratio_height, ratio_width = image_scale - - target = target.copy() - target["size"] = scaled_size - - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) - target["boxes"] = scaled_boxes - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - if "masks" in target: - assert False - masks = target['masks'] - # masks = interpolate( - # masks[:, None].float(), scaled_size, mode="nearest")[:, 0] > 0.5 - target['masks'] = masks - return target - - def crop_target(self, region, target): - i, j, h, w = region - fields = ["labels", "area"] - - target = target.copy() - target["size"] = torch.tensor([h, w]) - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target['masks'] = target['masks'][:, i:i + h, j:j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target['boxes'].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target['masks'].flatten(1).any(1) - - for field in fields: - target[field] = target[field][keep.tolist()] - return target - - def pad_target(self, padding, target): - target = target.copy() - if "masks" in target: - target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[1], 0, padding[0])) - return target - - def __call__(self, image, target=None): - image_size = image.size - image_size = torch.tensor(image_size[::-1]) - - out_desired_size = (self.desired_size * image_size / max(image_size)).round().int() - - random_scale = torch.rand(1) * (self.aug_scale_max - self.aug_scale_min) + self.aug_scale_min - scaled_size = (random_scale * self.desired_size).round() - - scale = torch.minimum(scaled_size / image_size[0], scaled_size / image_size[1]) - scaled_size = (image_size * scale).round().int() - - scaled_image = F.resize(image, scaled_size.tolist()) - - if target is not None: - target = self.rescale_target(scaled_size, image_size, target) - - # randomly crop or pad images - if random_scale > 1: - # Selects non-zero random offset (x, y) if scaled image is larger than desired_size. - max_offset = scaled_size - out_desired_size - offset = (max_offset * torch.rand(2)).floor().int() - region = (offset[0].item(), offset[1].item(), - out_desired_size[0].item(), out_desired_size[1].item()) - output_image = F.crop(scaled_image, *region) - if target is not None: - target = self.crop_target(region, target) - else: - padding = out_desired_size - scaled_size - output_image = F.pad(scaled_image, [0, 0, padding[1].item(), padding[0].item()]) - if target is not None: - target = self.pad_target(padding, target) - - return output_image, target - - -class RandomDistortion(object): - """ - Distort image w.r.t hue, saturation and exposure. - """ - - def __init__(self, brightness=0, contrast=0, saturation=0, hue=0, prob=0.5): - self.prob = prob - self.tfm = T.ColorJitter(brightness, contrast, saturation, hue) - - def __call__(self, img, target=None): - if np.random.random() < self.prob: - return self.tfm(img), target - else: - return img, target diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/hifi_gan/meldataset.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/hifi_gan/meldataset.py deleted file mode 100644 index 8c6ca9ec8a6cc6408a77492e795bffef7f86b611..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/hifi_gan/meldataset.py +++ /dev/null @@ -1,233 +0,0 @@ -import math -import os -import random -import torch -import torch.utils.data -import numpy as np -from librosa.util import normalize -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def load_wav(full_path): - sampling_rate, data = read(full_path) - return data, sampling_rate - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def mel_spectrogram( - y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False -): - if torch.min(y) < -1.0: - print("min value is ", torch.min(y)) - if torch.max(y) > 1.0: - print("max value is ", torch.max(y)) - - global mel_basis, hann_window - if fmax not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[str(fmax) + "_" + str(y.device)] = ( - torch.from_numpy(mel).float().to(y.device) - ) - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad( - y.unsqueeze(1), - (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), - mode="reflect", - ) - y = y.squeeze(1) - - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[str(y.device)], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - ) - - spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) - - spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec) - spec = spectral_normalize_torch(spec) - - return spec - - -def get_dataset_filelist(a): - with open(a.input_training_file, "r", encoding="utf-8") as fi: - training_files = [x for x in fi.read().split("\n") if len(x) > 0] - - with open(a.input_validation_file, "r", encoding="utf-8") as fi: - validation_files = [x for x in fi.read().split("\n") if len(x) > 0] - return training_files, validation_files - - -class MelDataset(torch.utils.data.Dataset): - def __init__( - self, - training_files, - segment_size, - n_fft, - num_mels, - hop_size, - win_size, - sampling_rate, - fmin, - fmax, - split=True, - shuffle=True, - n_cache_reuse=1, - device=None, - fmax_loss=None, - fine_tuning=False, - base_mels_path=None, - ): - self.audio_files = training_files - random.seed(1234) - if shuffle: - random.shuffle(self.audio_files) - self.segment_size = segment_size - self.sampling_rate = sampling_rate - self.split = split - self.n_fft = n_fft - self.num_mels = num_mels - self.hop_size = hop_size - self.win_size = win_size - self.fmin = fmin - self.fmax = fmax - self.fmax_loss = fmax_loss - self.cached_wav = None - self.n_cache_reuse = n_cache_reuse - self._cache_ref_count = 0 - self.device = device - self.fine_tuning = fine_tuning - self.base_mels_path = base_mels_path - - def __getitem__(self, index): - filename = self.audio_files[index] - if self._cache_ref_count == 0: - audio, sampling_rate = load_wav(filename) - audio = audio / MAX_WAV_VALUE - if not self.fine_tuning: - audio = normalize(audio) * 0.95 - self.cached_wav = audio - if sampling_rate != self.sampling_rate: - raise ValueError( - "{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate - ) - ) - self._cache_ref_count = self.n_cache_reuse - else: - audio = self.cached_wav - self._cache_ref_count -= 1 - - audio = torch.FloatTensor(audio) - audio = audio.unsqueeze(0) - - if not self.fine_tuning: - if self.split: - if audio.size(1) >= self.segment_size: - max_audio_start = audio.size(1) - self.segment_size - audio_start = random.randint(0, max_audio_start) - audio = audio[:, audio_start : audio_start + self.segment_size] - else: - audio = torch.nn.functional.pad( - audio, (0, self.segment_size - audio.size(1)), "constant" - ) - - mel = mel_spectrogram( - audio, - self.n_fft, - self.num_mels, - self.sampling_rate, - self.hop_size, - self.win_size, - self.fmin, - self.fmax, - center=False, - ) - else: - mel = np.load( - os.path.join( - self.base_mels_path, - os.path.splitext(os.path.split(filename)[-1])[0] + ".npy", - ) - ) - mel = torch.from_numpy(mel) - - if len(mel.shape) < 3: - mel = mel.unsqueeze(0) - - if self.split: - frames_per_seg = math.ceil(self.segment_size / self.hop_size) - - if audio.size(1) >= self.segment_size: - mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) - mel = mel[:, :, mel_start : mel_start + frames_per_seg] - audio = audio[ - :, - mel_start - * self.hop_size : (mel_start + frames_per_seg) - * self.hop_size, - ] - else: - mel = torch.nn.functional.pad( - mel, (0, frames_per_seg - mel.size(2)), "constant" - ) - audio = torch.nn.functional.pad( - audio, (0, self.segment_size - audio.size(1)), "constant" - ) - - mel_loss = mel_spectrogram( - audio, - self.n_fft, - self.num_mels, - self.sampling_rate, - self.hop_size, - self.win_size, - self.fmin, - self.fmax_loss, - center=False, - ) - - return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) - - def __len__(self): - return len(self.audio_files) diff --git a/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/model/__init__.py b/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/model/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HighCWu/GPEN/retinaface/utils/timer.py b/spaces/HighCWu/GPEN/retinaface/utils/timer.py deleted file mode 100644 index e4b3b8098a5ad41f8d18d42b6b2fedb694aa5508..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/GPEN/retinaface/utils/timer.py +++ /dev/null @@ -1,40 +0,0 @@ -# -------------------------------------------------------- -# Fast R-CNN -# Copyright (c) 2015 Microsoft -# Licensed under The MIT License [see LICENSE for details] -# Written by Ross Girshick -# -------------------------------------------------------- - -import time - - -class Timer(object): - """A simple timer.""" - def __init__(self): - self.total_time = 0. - self.calls = 0 - self.start_time = 0. - self.diff = 0. - self.average_time = 0. - - def tic(self): - # using time.time instead of time.clock because time time.clock - # does not normalize for multithreading - self.start_time = time.time() - - def toc(self, average=True): - self.diff = time.time() - self.start_time - self.total_time += self.diff - self.calls += 1 - self.average_time = self.total_time / self.calls - if average: - return self.average_time - else: - return self.diff - - def clear(self): - self.total_time = 0. - self.calls = 0 - self.start_time = 0. - self.diff = 0. - self.average_time = 0. diff --git a/spaces/HighCWu/Style2Paints-4-Gradio/README.md b/spaces/HighCWu/Style2Paints-4-Gradio/README.md deleted file mode 100644 index d1c62d5447fd1f4f5cd9432fd13899780728d0b7..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/Style2Paints-4-Gradio/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Style2Paints 4 Gradio -emoji: 🐨 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: apache-2.0 -python_version: 3.8 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/HuggingAlgorithms/Object-Detection-with-YOLO/app.py b/spaces/HuggingAlgorithms/Object-Detection-with-YOLO/app.py deleted file mode 100644 index a6521ca0578ed8eea87773d2c34fe0a4f9d4af78..0000000000000000000000000000000000000000 --- a/spaces/HuggingAlgorithms/Object-Detection-with-YOLO/app.py +++ /dev/null @@ -1,89 +0,0 @@ -from transformers import AutoFeatureExtractor, YolosForObjectDetection -import gradio as gr -from PIL import Image -import torch -import matplotlib.pyplot as plt -import io -import numpy as np - - -COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125], - [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]] - - -def process_class_list(classes_string: str): - return [x.strip() for x in classes_string.split(",")] if classes_string else [] - -def model_inference(img, model_name: str, prob_threshold: int, classes_to_show = str): - feature_extractor = AutoFeatureExtractor.from_pretrained(f"hustvl/{model_name}") - model = YolosForObjectDetection.from_pretrained(f"hustvl/{model_name}") - - img = Image.fromarray(img) - - pixel_values = feature_extractor(img, return_tensors="pt").pixel_values - - with torch.no_grad(): - outputs = model(pixel_values, output_attentions=True) - - probas = outputs.logits.softmax(-1)[0, :, :-1] - keep = probas.max(-1).values > prob_threshold - - target_sizes = torch.tensor(img.size[::-1]).unsqueeze(0) - postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes) - bboxes_scaled = postprocessed_outputs[0]['boxes'] - - classes_list = process_class_list(classes_to_show) - return plot_results( - img, probas[keep], bboxes_scaled[keep], model, classes_list - ) - -def plot_results(pil_img, prob, boxes, model, classes_list): - plt.figure(figsize=(16,10)) - plt.imshow(pil_img) - ax = plt.gca() - colors = COLORS * 100 - for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors): - cl = p.argmax() - object_class = model.config.id2label[cl.item()] - - if len(classes_list) > 0 : - if object_class not in classes_list: - continue - - ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, - fill=False, color=c, linewidth=3)) - text = f'{object_class}: {p[cl]:0.2f}' - ax.text(xmin, ymin, text, fontsize=15, - bbox=dict(facecolor='yellow', alpha=0.5)) - plt.axis('off') - return fig2img(plt.gcf()) - -def fig2img(fig): - buf = io.BytesIO() - fig.savefig(buf) - buf.seek(0) - return Image.open(buf) - -description = """ -Do you want to see what objects are in your images? Try our object detection app, powered by YOLOS, a state-of-the-art algorithm that can find and name multiple objects in a single image. -You can upload or drag and drop an image file to detect objects using YOLOS models. -You can also choose from different YOLOS models, adjust the probability threshold, and select the classes to use for detection. -Our app will show you the results in an interactive image with bounding boxes and labels for each detected object. -You can also download the results as an image file. Our app is fast, accurate, and easy to use. -Try it now and discover the power of object detection! 😊 -""" - -image_in = gr.components.Image() -image_out = gr.components.Image() -model_choice = gr.components.Dropdown(["yolos-tiny", "yolos-small", "yolos-base", "yolos-small-300", "yolos-small-dwr"], value="yolos-small", label="YOLOS Model") -prob_threshold_slider = gr.components.Slider(minimum=0, maximum=1.0, step=0.01, value=0.9, label="Probability Threshold") -classes_to_show = gr.components.Textbox(placeholder="e.g. person, car , laptop", label="Classes to use (Optional)") - -Iface = gr.Interface( - fn=model_inference, - inputs=[image_in,model_choice, prob_threshold_slider, classes_to_show], - outputs=image_out, - title="Object Detection With YOLO", - description=description, - theme='HaleyCH/HaleyCH_Theme', -).launch() \ No newline at end of file diff --git a/spaces/IAMTFRMZA/DreamlikeArt-Diffusion-1.0/app.py b/spaces/IAMTFRMZA/DreamlikeArt-Diffusion-1.0/app.py deleted file mode 100644 index 650f6f13d5a4ea72430d7085707fc9aed8ba2013..0000000000000000000000000000000000000000 --- a/spaces/IAMTFRMZA/DreamlikeArt-Diffusion-1.0/app.py +++ /dev/null @@ -1,143 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path -import random -import string -import time -from queue import Queue -from threading import Thread -import emoji - - -text_gen=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion") -def get_prompts(prompt_text): - if prompt_text: - return text_gen("dreamlikeart, " + prompt_text) - else: - return text_gen("") -proc1=gr.Interface.load("models/dreamlike-art/dreamlike-diffusion-1.0") - -def restart_script_periodically(): - while True: - random_time = random.randint(540, 600) - time.sleep(random_time) - os.execl(sys.executable, sys.executable, *sys.argv) - - -restart_thread = Thread(target=restart_script_periodically, daemon=True) -restart_thread.start() - - -queue = Queue() -queue_threshold = 100 - -def add_random_noise(prompt, noise_level=0.00): - if noise_level == 0: - noise_level = 0.00 - percentage_noise = noise_level * 5 - num_noise_chars = int(len(prompt) * (percentage_noise/100)) - noise_indices = random.sample(range(len(prompt)), num_noise_chars) - prompt_list = list(prompt) - noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits) - noise_chars.extend(['😍', '💩', '😂', '🤔', '😊', '🤗', '😭', '🙄', '😷', '🤯', '🤫', '🥴', '😴', '🤩', '🥳', '😔', '😩', '🤪', '😇', '🤢', '😈', '👹', '👻', '🤖', '👽', '💀', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', '🐭', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', '🌈']) - for index in noise_indices: - prompt_list[index] = random.choice(noise_chars) - return "".join(prompt_list) - - - -def send_it1(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output1 = proc1(prompt_with_noise) - return output1 - -def send_it2(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output2 = proc1(prompt_with_noise) - return output2 - -def send_it3(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output3 = proc1(prompt_with_noise) - return output3 - -def send_it4(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output4 = proc1(prompt_with_noise) - return output4 - - -with gr.Blocks(css='style.css') as demo: - gr.HTML( - """ -
-
-

- Image Creation Demo -

-
-

- Noise Level: Controls how much randomness is added to the input before it is sent to the model. Higher noise level produces more diverse outputs, while lower noise level produces similar outputs. -

-
- """ - ) - with gr.Column(elem_id="col-container"): - with gr.Row(variant="compact"): - input_text = gr.Textbox( - label="Short Prompt", - show_label=False, - max_lines=2, - placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!", - ).style( - container=False, - ) - see_prompts = gr.Button("✨ Magic Prompt ✨").style(full_width=False) - - - with gr.Row(variant="compact"): - prompt = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=2, - placeholder="Full Prompt", - ).style( - container=False, - ) - run = gr.Button("Generate Images").style(full_width=False) - - with gr.Row(): - with gr.Row(): - noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="Noise Level") - with gr.Row(): - with gr.Row(): - output1=gr.Image(label="Dreamlike Diffusion 1.0",show_label=False) - output2=gr.Image(label="Dreamlike Diffusion 1.0",show_label=False) - - - see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False) - run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1]) - run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2]) - - - with gr.Row(): - gr.HTML( - """ - """ -) - - demo.launch(enable_queue=True, inline=True) - block.queue(concurrency_count=100) \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/datasets/asr_prep_json.py b/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/datasets/asr_prep_json.py deleted file mode 100644 index b8db8ff16691158fae034a8ab3faad622b351caf..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/datasets/asr_prep_json.py +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from __future__ import absolute_import, division, print_function, unicode_literals - -import argparse -import concurrent.futures -import json -import multiprocessing -import os -from collections import namedtuple -from itertools import chain - -import sentencepiece as spm -from fairseq.data import Dictionary - - -MILLISECONDS_TO_SECONDS = 0.001 - - -def process_sample(aud_path, lable, utt_id, sp, tgt_dict): - import torchaudio - - input = {} - output = {} - si, ei = torchaudio.info(aud_path) - input["length_ms"] = int( - si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS - ) - input["path"] = aud_path - - token = " ".join(sp.EncodeAsPieces(lable)) - ids = tgt_dict.encode_line(token, append_eos=False) - output["text"] = lable - output["token"] = token - output["tokenid"] = ", ".join(map(str, [t.tolist() for t in ids])) - return {utt_id: {"input": input, "output": output}} - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--audio-dirs", - nargs="+", - default=["-"], - required=True, - help="input directories with audio files", - ) - parser.add_argument( - "--labels", - required=True, - help="aggregated input labels with format per line", - type=argparse.FileType("r", encoding="UTF-8"), - ) - parser.add_argument( - "--spm-model", - required=True, - help="sentencepiece model to use for encoding", - type=argparse.FileType("r", encoding="UTF-8"), - ) - parser.add_argument( - "--dictionary", - required=True, - help="file to load fairseq dictionary from", - type=argparse.FileType("r", encoding="UTF-8"), - ) - parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav") - parser.add_argument( - "--output", - required=True, - type=argparse.FileType("w"), - help="path to save json output", - ) - args = parser.parse_args() - - sp = spm.SentencePieceProcessor() - sp.Load(args.spm_model.name) - - tgt_dict = Dictionary.load(args.dictionary) - - labels = {} - for line in args.labels: - (utt_id, label) = line.split(" ", 1) - labels[utt_id] = label - if len(labels) == 0: - raise Exception("No labels found in ", args.labels_path) - - Sample = namedtuple("Sample", "aud_path utt_id") - samples = [] - for path, _, files in chain.from_iterable( - os.walk(path) for path in args.audio_dirs - ): - for f in files: - if f.endswith(args.audio_format): - if len(os.path.splitext(f)) != 2: - raise Exception("Expect file name. Got: ", f) - utt_id = os.path.splitext(f)[0] - if utt_id not in labels: - continue - samples.append(Sample(os.path.join(path, f), utt_id)) - - utts = {} - num_cpu = multiprocessing.cpu_count() - with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor: - future_to_sample = { - executor.submit( - process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict - ): s - for s in samples - } - for future in concurrent.futures.as_completed(future_to_sample): - try: - data = future.result() - except Exception as exc: - print("generated an exception: ", exc) - else: - utts.update(data) - json.dump({"utts": utts}, args.output, indent=4) - - -if __name__ == "__main__": - main() diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/README.md b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/README.md deleted file mode 100644 index 4a3ae54b857c43621c9fb67ee4b214584beec835..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Speech Synthesis (S^2) -=== - -Speech synthesis with fairseq. - -- Autoregressive and non-autoregressive models -- Multi-speaker synthesis -- Audio preprocessing -- Automatic metrics -- Similar data configuration as [S2T](../speech_to_text/README.md) - - -## Examples -- [Single-speaker synthesis on LJSpeech](docs/ljspeech_example.md) -- [Multi-speaker synthesis on VCTK](docs/vctk_example.md) -- [Multi-speaker synthesis on Common Voice](docs/common_voice_example.md) diff --git a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py b/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py deleted file mode 100644 index f83471409a434556cab70086ca9e2d72d4bdddd5..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import sys - - -def main(): - for line in sys.stdin: - print(" ".join(list(line.strip().replace(" ", "|"))) + " |") - - -if __name__ == "__main__": - main() diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/speech_to_text/berard.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/speech_to_text/berard.py deleted file mode 100644 index c505e3acaa84e5f3263ccbfaf9556f77123f09fc..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/models/speech_to_text/berard.py +++ /dev/null @@ -1,606 +0,0 @@ -#!/usr/bin/env python3 - -from ast import literal_eval -from typing import List, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import checkpoint_utils, utils -from fairseq.data.data_utils import lengths_to_padding_mask -from fairseq.models import ( - FairseqEncoder, - FairseqEncoderDecoderModel, - FairseqIncrementalDecoder, - register_model, - register_model_architecture, -) - - -@register_model("s2t_berard") -class BerardModel(FairseqEncoderDecoderModel): - """Implementation of a model similar to https://arxiv.org/abs/1802.04200 - - Paper title: End-to-End Automatic Speech Translation of Audiobooks - An implementation is available in tensorflow at - https://github.com/eske/seq2seq - Relevant files in this implementation are the config - (https://github.com/eske/seq2seq/blob/master/config/LibriSpeech/AST.yaml) - and the model code - (https://github.com/eske/seq2seq/blob/master/translate/models.py). - The encoder and decoder try to be close to the original implementation. - The attention is an MLP as in Bahdanau et al. - (https://arxiv.org/abs/1409.0473). - There is no state initialization by averaging the encoder outputs. - """ - - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - @staticmethod - def add_args(parser): - parser.add_argument( - "--input-layers", - type=str, - metavar="EXPR", - help="List of linear layer dimensions. These " - "layers are applied to the input features and " - "are followed by tanh and possibly dropout.", - ) - parser.add_argument( - "--dropout", - type=float, - metavar="D", - help="Dropout probability to use in the encoder/decoder. " - "Note that this parameters control dropout in various places, " - "there is no fine-grained control for dropout for embeddings " - "vs LSTM layers for example.", - ) - parser.add_argument( - "--in-channels", - type=int, - metavar="N", - help="Number of encoder input channels. " "Typically value is 1.", - ) - parser.add_argument( - "--conv-layers", - type=str, - metavar="EXPR", - help="List of conv layers " "(format: (channels, kernel, stride)).", - ) - parser.add_argument( - "--num-blstm-layers", - type=int, - metavar="N", - help="Number of encoder bi-LSTM layers.", - ) - parser.add_argument( - "--lstm-size", type=int, metavar="N", help="LSTM hidden size." - ) - parser.add_argument( - "--decoder-embed-dim", - type=int, - metavar="N", - help="Embedding dimension of the decoder target tokens.", - ) - parser.add_argument( - "--decoder-hidden-dim", - type=int, - metavar="N", - help="Decoder LSTM hidden dimension.", - ) - parser.add_argument( - "--decoder-num-layers", - type=int, - metavar="N", - help="Number of decoder LSTM layers.", - ) - parser.add_argument( - "--attention-dim", - type=int, - metavar="N", - help="Hidden layer dimension in MLP attention.", - ) - parser.add_argument( - "--output-layer-dim", - type=int, - metavar="N", - help="Hidden layer dim for linear layer prior to output projection.", - ) - parser.add_argument( - "--load-pretrained-encoder-from", - type=str, - metavar="STR", - help="model to take encoder weights from (for initialization)", - ) - parser.add_argument( - "--load-pretrained-decoder-from", - type=str, - metavar="STR", - help="model to take decoder weights from (for initialization)", - ) - - @classmethod - def build_encoder(cls, args, task): - encoder = BerardEncoder( - input_layers=literal_eval(args.input_layers), - conv_layers=literal_eval(args.conv_layers), - in_channels=args.input_channels, - input_feat_per_channel=args.input_feat_per_channel, - num_blstm_layers=args.num_blstm_layers, - lstm_size=args.lstm_size, - dropout=args.dropout, - ) - if getattr(args, "load_pretrained_encoder_from", None): - encoder = checkpoint_utils.load_pretrained_component_from_model( - component=encoder, checkpoint=args.load_pretrained_encoder_from - ) - return encoder - - @classmethod - def build_decoder(cls, args, task): - decoder = LSTMDecoder( - dictionary=task.target_dictionary, - embed_dim=args.decoder_embed_dim, - num_layers=args.decoder_num_layers, - hidden_size=args.decoder_hidden_dim, - dropout=args.dropout, - encoder_output_dim=2 * args.lstm_size, # bidirectional - attention_dim=args.attention_dim, - output_layer_dim=args.output_layer_dim, - ) - if getattr(args, "load_pretrained_decoder_from", None): - decoder = checkpoint_utils.load_pretrained_component_from_model( - component=decoder, checkpoint=args.load_pretrained_decoder_from - ) - return decoder - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - encoder = cls.build_encoder(args, task) - decoder = cls.build_decoder(args, task) - - return cls(encoder, decoder) - - def get_normalized_probs(self, net_output, log_probs, sample=None): - # net_output['encoder_out'] is a (B, T, D) tensor - lprobs = super().get_normalized_probs(net_output, log_probs, sample) - # lprobs is a (B, T, D) tensor - lprobs.batch_first = True - return lprobs - - -class BerardEncoder(FairseqEncoder): - def __init__( - self, - input_layers: List[int], - conv_layers: List[Tuple[int]], - in_channels: int, - input_feat_per_channel: int, - num_blstm_layers: int, - lstm_size: int, - dropout: float, - ): - """ - Args: - input_layers: list of linear layer dimensions. These layers are - applied to the input features and are followed by tanh and - possibly dropout. - conv_layers: list of conv2d layer configurations. A configuration is - a tuple (out_channels, conv_kernel_size, stride). - in_channels: number of input channels. - input_feat_per_channel: number of input features per channel. These - are speech features, typically 40 or 80. - num_blstm_layers: number of bidirectional LSTM layers. - lstm_size: size of the LSTM hidden (and cell) size. - dropout: dropout probability. Dropout can be applied after the - linear layers and LSTM layers but not to the convolutional - layers. - """ - super().__init__(None) - - self.input_layers = nn.ModuleList() - in_features = input_feat_per_channel - for out_features in input_layers: - if dropout > 0: - self.input_layers.append( - nn.Sequential( - nn.Linear(in_features, out_features), nn.Dropout(p=dropout) - ) - ) - else: - self.input_layers.append(nn.Linear(in_features, out_features)) - in_features = out_features - - self.in_channels = in_channels - self.input_dim = input_feat_per_channel - self.conv_kernel_sizes_and_strides = [] - self.conv_layers = nn.ModuleList() - lstm_input_dim = input_layers[-1] - for conv_layer in conv_layers: - out_channels, conv_kernel_size, conv_stride = conv_layer - self.conv_layers.append( - nn.Conv2d( - in_channels, - out_channels, - conv_kernel_size, - stride=conv_stride, - padding=conv_kernel_size // 2, - ) - ) - self.conv_kernel_sizes_and_strides.append((conv_kernel_size, conv_stride)) - in_channels = out_channels - lstm_input_dim //= conv_stride - - lstm_input_dim *= conv_layers[-1][0] - self.lstm_size = lstm_size - self.num_blstm_layers = num_blstm_layers - self.lstm = nn.LSTM( - input_size=lstm_input_dim, - hidden_size=lstm_size, - num_layers=num_blstm_layers, - dropout=dropout, - bidirectional=True, - ) - self.output_dim = 2 * lstm_size # bidirectional - if dropout > 0: - self.dropout = nn.Dropout(p=dropout) - else: - self.dropout = None - - def forward(self, src_tokens, src_lengths=None, **kwargs): - """ - Args - src_tokens: padded tensor (B, T, C * feat) - src_lengths: tensor of original lengths of input utterances (B,) - """ - bsz, max_seq_len, _ = src_tokens.size() - # (B, C, T, feat) - x = ( - src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) - .transpose(1, 2) - .contiguous() - ) - - for input_layer in self.input_layers: - x = input_layer(x) - x = torch.tanh(x) - - for conv_layer in self.conv_layers: - x = conv_layer(x) - - bsz, _, output_seq_len, _ = x.size() - - # (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> - # (T, B, C * feat) - x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) - - input_lengths = src_lengths.clone() - for k, s in self.conv_kernel_sizes_and_strides: - p = k // 2 - input_lengths = (input_lengths.float() + 2 * p - k) / s + 1 - input_lengths = input_lengths.floor().long() - - packed_x = nn.utils.rnn.pack_padded_sequence(x, input_lengths) - - h0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_() - c0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_() - packed_outs, _ = self.lstm(packed_x, (h0, c0)) - - # unpack outputs and apply dropout - x, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_outs) - if self.dropout is not None: - x = self.dropout(x) - - encoder_padding_mask = ( - lengths_to_padding_mask(output_lengths).to(src_tokens.device).t() - ) - - return { - "encoder_out": x, # (T, B, C) - "encoder_padding_mask": encoder_padding_mask, # (T, B) - } - - def reorder_encoder_out(self, encoder_out, new_order): - encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( - 1, new_order - ) - encoder_out["encoder_padding_mask"] = encoder_out[ - "encoder_padding_mask" - ].index_select(1, new_order) - return encoder_out - - -class MLPAttention(nn.Module): - """The original attention from Badhanau et al. (2014) - - https://arxiv.org/abs/1409.0473, based on a Multi-Layer Perceptron. - The attention score between position i in the encoder and position j in the - decoder is: alpha_ij = V_a * tanh(W_ae * enc_i + W_ad * dec_j + b_a) - """ - - def __init__(self, decoder_hidden_state_dim, context_dim, attention_dim): - super().__init__() - - self.context_dim = context_dim - self.attention_dim = attention_dim - # W_ae and b_a - self.encoder_proj = nn.Linear(context_dim, self.attention_dim, bias=True) - # W_ad - self.decoder_proj = nn.Linear( - decoder_hidden_state_dim, self.attention_dim, bias=False - ) - # V_a - self.to_scores = nn.Linear(self.attention_dim, 1, bias=False) - - def forward(self, decoder_state, source_hids, encoder_padding_mask): - """The expected input dimensions are: - decoder_state: bsz x decoder_hidden_state_dim - source_hids: src_len x bsz x context_dim - encoder_padding_mask: src_len x bsz - """ - src_len, bsz, _ = source_hids.size() - # (src_len*bsz) x context_dim (to feed through linear) - flat_source_hids = source_hids.view(-1, self.context_dim) - # (src_len*bsz) x attention_dim - encoder_component = self.encoder_proj(flat_source_hids) - # src_len x bsz x attention_dim - encoder_component = encoder_component.view(src_len, bsz, self.attention_dim) - # 1 x bsz x attention_dim - decoder_component = self.decoder_proj(decoder_state).unsqueeze(0) - # Sum with broadcasting and apply the non linearity - # src_len x bsz x attention_dim - hidden_att = torch.tanh( - (decoder_component + encoder_component).view(-1, self.attention_dim) - ) - # Project onto the reals to get attentions scores (src_len x bsz) - attn_scores = self.to_scores(hidden_att).view(src_len, bsz) - - # Mask + softmax (src_len x bsz) - if encoder_padding_mask is not None: - attn_scores = ( - attn_scores.float() - .masked_fill_(encoder_padding_mask, float("-inf")) - .type_as(attn_scores) - ) # FP16 support: cast to float and back - # srclen x bsz - normalized_masked_attn_scores = F.softmax(attn_scores, dim=0) - - # Sum weighted sources (bsz x context_dim) - attn_weighted_context = ( - source_hids * normalized_masked_attn_scores.unsqueeze(2) - ).sum(dim=0) - - return attn_weighted_context, normalized_masked_attn_scores - - -class LSTMDecoder(FairseqIncrementalDecoder): - def __init__( - self, - dictionary, - embed_dim, - num_layers, - hidden_size, - dropout, - encoder_output_dim, - attention_dim, - output_layer_dim, - ): - """ - Args: - dictionary: target text dictionary. - embed_dim: embedding dimension for target tokens. - num_layers: number of LSTM layers. - hidden_size: hidden size for LSTM layers. - dropout: dropout probability. Dropout can be applied to the - embeddings, the LSTM layers, and the context vector. - encoder_output_dim: encoder output dimension (hidden size of - encoder LSTM). - attention_dim: attention dimension for MLP attention. - output_layer_dim: size of the linear layer prior to output - projection. - """ - super().__init__(dictionary) - self.num_layers = num_layers - self.hidden_size = hidden_size - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - self.embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx) - if dropout > 0: - self.dropout = nn.Dropout(p=dropout) - else: - self.dropout = None - - self.layers = nn.ModuleList() - for layer_id in range(num_layers): - input_size = embed_dim if layer_id == 0 else encoder_output_dim - self.layers.append( - nn.LSTMCell(input_size=input_size, hidden_size=hidden_size) - ) - - self.context_dim = encoder_output_dim - self.attention = MLPAttention( - decoder_hidden_state_dim=hidden_size, - context_dim=encoder_output_dim, - attention_dim=attention_dim, - ) - - self.deep_output_layer = nn.Linear( - hidden_size + encoder_output_dim + embed_dim, output_layer_dim - ) - self.output_projection = nn.Linear(output_layer_dim, num_embeddings) - - def forward( - self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs - ): - encoder_padding_mask = encoder_out["encoder_padding_mask"] - encoder_outs = encoder_out["encoder_out"] - - if incremental_state is not None: - prev_output_tokens = prev_output_tokens[:, -1:] - bsz, seqlen = prev_output_tokens.size() - - srclen = encoder_outs.size(0) - - # embed tokens - embeddings = self.embed_tokens(prev_output_tokens) - x = embeddings - if self.dropout is not None: - x = self.dropout(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # initialize previous states (or get from cache during incremental - # generation) - cached_state = utils.get_incremental_state( - self, incremental_state, "cached_state" - ) - if cached_state is not None: - prev_hiddens, prev_cells = cached_state - else: - prev_hiddens = [encoder_out["encoder_out"].mean(dim=0)] * self.num_layers - prev_cells = [x.new_zeros(bsz, self.hidden_size)] * self.num_layers - - attn_scores = x.new_zeros(bsz, srclen) - attention_outs = [] - outs = [] - for j in range(seqlen): - input = x[j, :, :] - attention_out = None - for i, layer in enumerate(self.layers): - # the previous state is one layer below except for the bottom - # layer where the previous state is the state emitted by the - # top layer - hidden, cell = layer( - input, - ( - prev_hiddens[(i - 1) % self.num_layers], - prev_cells[(i - 1) % self.num_layers], - ), - ) - if self.dropout is not None: - hidden = self.dropout(hidden) - prev_hiddens[i] = hidden - prev_cells[i] = cell - if attention_out is None: - attention_out, attn_scores = self.attention( - hidden, encoder_outs, encoder_padding_mask - ) - if self.dropout is not None: - attention_out = self.dropout(attention_out) - attention_outs.append(attention_out) - input = attention_out - - # collect the output of the top layer - outs.append(hidden) - - # cache previous states (no-op except during incremental generation) - utils.set_incremental_state( - self, incremental_state, "cached_state", (prev_hiddens, prev_cells) - ) - - # collect outputs across time steps - x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) - attention_outs_concat = torch.cat(attention_outs, dim=0).view( - seqlen, bsz, self.context_dim - ) - - # T x B x C -> B x T x C - x = x.transpose(0, 1) - attention_outs_concat = attention_outs_concat.transpose(0, 1) - - # concat LSTM output, attention output and embedding - # before output projection - x = torch.cat((x, attention_outs_concat, embeddings), dim=2) - x = self.deep_output_layer(x) - x = torch.tanh(x) - if self.dropout is not None: - x = self.dropout(x) - # project back to size of vocabulary - x = self.output_projection(x) - - # to return the full attn_scores tensor, we need to fix the decoder - # to account for subsampling input frames - # return x, attn_scores - return x, None - - def reorder_incremental_state(self, incremental_state, new_order): - super().reorder_incremental_state(incremental_state, new_order) - cached_state = utils.get_incremental_state( - self, incremental_state, "cached_state" - ) - if cached_state is None: - return - - def reorder_state(state): - if isinstance(state, list): - return [reorder_state(state_i) for state_i in state] - return state.index_select(0, new_order) - - new_state = tuple(map(reorder_state, cached_state)) - utils.set_incremental_state(self, incremental_state, "cached_state", new_state) - - -@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard") -def berard(args): - """The original version: "End-to-End Automatic Speech Translation of - Audiobooks" (https://arxiv.org/abs/1802.04200) - """ - args.input_layers = getattr(args, "input_layers", "[256, 128]") - args.conv_layers = getattr(args, "conv_layers", "[(16, 3, 2), (16, 3, 2)]") - args.num_blstm_layers = getattr(args, "num_blstm_layers", 3) - args.lstm_size = getattr(args, "lstm_size", 256) - args.dropout = getattr(args, "dropout", 0.2) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) - args.decoder_num_layers = getattr(args, "decoder_num_layers", 2) - args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 512) - args.attention_dim = getattr(args, "attention_dim", 512) - args.output_layer_dim = getattr(args, "output_layer_dim", 128) - args.load_pretrained_encoder_from = getattr( - args, "load_pretrained_encoder_from", None - ) - args.load_pretrained_decoder_from = getattr( - args, "load_pretrained_decoder_from", None - ) - - -@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_256_3_3") -def berard_256_3_3(args): - """Used in - * "Harnessing Indirect Training Data for End-to-End Automatic Speech - Translation: Tricks of the Trade" (https://arxiv.org/abs/1909.06515) - * "CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus" - (https://arxiv.org/pdf/2002.01320.pdf) - * "Self-Supervised Representations Improve End-to-End Speech Translation" - (https://arxiv.org/abs/2006.12124) - """ - args.decoder_num_layers = getattr(args, "decoder_num_layers", 3) - berard(args) - - -@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_3_2") -def berard_512_3_2(args): - args.num_blstm_layers = getattr(args, "num_blstm_layers", 3) - args.lstm_size = getattr(args, "lstm_size", 512) - args.dropout = getattr(args, "dropout", 0.3) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) - args.decoder_num_layers = getattr(args, "decoder_num_layers", 2) - args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024) - args.attention_dim = getattr(args, "attention_dim", 512) - args.output_layer_dim = getattr(args, "output_layer_dim", 256) - berard(args) - - -@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_5_3") -def berard_512_5_3(args): - args.num_blstm_layers = getattr(args, "num_blstm_layers", 5) - args.lstm_size = getattr(args, "lstm_size", 512) - args.dropout = getattr(args, "dropout", 0.3) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) - args.decoder_num_layers = getattr(args, "decoder_num_layers", 3) - args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024) - args.attention_dim = getattr(args, "attention_dim", 512) - args.output_layer_dim = getattr(args, "output_layer_dim", 256) - berard(args) diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py deleted file mode 100644 index f869c4b2f8fb15f96a292e39bd293df7898a4fce..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Callable, Optional - -import torch -import torch.nn as nn -from fairseq import utils -from fairseq.modules import LayerNorm, MultiheadAttention -from fairseq.modules.fairseq_dropout import FairseqDropout -from fairseq.modules.quant_noise import quant_noise - - -class TransformerSentenceEncoderLayer(nn.Module): - """ - Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained - models. - """ - - def __init__( - self, - embedding_dim: int = 768, - ffn_embedding_dim: int = 3072, - num_attention_heads: int = 8, - dropout: float = 0.1, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - activation_fn: str = "relu", - export: bool = False, - q_noise: float = 0.0, - qn_block_size: int = 8, - init_fn: Callable = None, - ) -> None: - super().__init__() - - if init_fn is not None: - init_fn() - - # Initialize parameters - self.embedding_dim = embedding_dim - self.num_attention_heads = num_attention_heads - self.attention_dropout = attention_dropout - self.q_noise = q_noise - self.qn_block_size = qn_block_size - - self.dropout_module = FairseqDropout( - dropout, module_name=self.__class__.__name__ - ) - self.activation_dropout_module = FairseqDropout( - activation_dropout, module_name=self.__class__.__name__ - ) - - # Initialize blocks - self.activation_fn = utils.get_activation_fn(activation_fn) - self.self_attn = self.build_self_attention( - self.embedding_dim, - num_attention_heads, - dropout=attention_dropout, - self_attention=True, - q_noise=q_noise, - qn_block_size=qn_block_size, - ) - - # layer norm associated with the self attention layer - self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export) - - self.fc1 = self.build_fc1( - self.embedding_dim, - ffn_embedding_dim, - q_noise=q_noise, - qn_block_size=qn_block_size, - ) - self.fc2 = self.build_fc2( - ffn_embedding_dim, - self.embedding_dim, - q_noise=q_noise, - qn_block_size=qn_block_size, - ) - - # layer norm associated with the position wise feed-forward NN - self.final_layer_norm = LayerNorm(self.embedding_dim, export=export) - - def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): - return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) - - def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): - return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) - - def build_self_attention( - self, - embed_dim, - num_attention_heads, - dropout, - self_attention, - q_noise, - qn_block_size, - ): - return MultiheadAttention( - embed_dim, - num_attention_heads, - dropout=dropout, - self_attention=True, - q_noise=q_noise, - qn_block_size=qn_block_size, - ) - - def forward( - self, - x: torch.Tensor, - self_attn_mask: Optional[torch.Tensor] = None, - self_attn_padding_mask: Optional[torch.Tensor] = None, - ): - """ - LayerNorm is applied either before or after the self-attention/ffn - modules similar to the original Transformer implementation. - """ - residual = x - x, attn = self.self_attn( - query=x, - key=x, - value=x, - key_padding_mask=self_attn_padding_mask, - need_weights=False, - attn_mask=self_attn_mask, - ) - x = self.dropout_module(x) - x = residual + x - x = self.self_attn_layer_norm(x) - - residual = x - x = self.activation_fn(self.fc1(x)) - x = self.activation_dropout_module(x) - x = self.fc2(x) - x = self.dropout_module(x) - x = residual + x - x = self.final_layer_norm(x) - return x, attn diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/demo/src/App.tsx b/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/demo/src/App.tsx deleted file mode 100644 index a426553564b0652ba26ef39484ec67121809e939..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/demo/src/App.tsx +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) Meta Platforms, Inc. and affiliates. -// All rights reserved. - -// This source code is licensed under the license found in the -// LICENSE file in the root directory of this source tree. - -import { InferenceSession, Tensor } from "onnxruntime-web"; -import React, { useContext, useEffect, useState } from "react"; -import "./assets/scss/App.scss"; -import { handleImageScale } from "./components/helpers/scaleHelper"; -import { modelScaleProps } from "./components/helpers/Interfaces"; -import { onnxMaskToImage } from "./components/helpers/maskUtils"; -import { modelData } from "./components/helpers/onnxModelAPI"; -import Stage from "./components/Stage"; -import AppContext from "./components/hooks/createContext"; -const ort = require("onnxruntime-web"); -/* @ts-ignore */ -import npyjs from "npyjs"; - -// Define image, embedding and model paths -const IMAGE_PATH = "/assets/data/dogs.jpg"; -const IMAGE_EMBEDDING = "/assets/data/dogs_embedding.npy"; -const MODEL_DIR = "/model/sam_onnx_quantized_example.onnx"; - -const App = () => { - const { - clicks: [clicks], - image: [, setImage], - maskImg: [, setMaskImg], - } = useContext(AppContext)!; - const [model, setModel] = useState(null); // ONNX model - const [tensor, setTensor] = useState(null); // Image embedding tensor - - // The ONNX model expects the input to be rescaled to 1024. - // The modelScale state variable keeps track of the scale values. - const [modelScale, setModelScale] = useState(null); - - // Initialize the ONNX model. load the image, and load the SAM - // pre-computed image embedding - useEffect(() => { - // Initialize the ONNX model - const initModel = async () => { - try { - if (MODEL_DIR === undefined) return; - const URL: string = MODEL_DIR; - const model = await InferenceSession.create(URL); - setModel(model); - } catch (e) { - console.log(e); - } - }; - initModel(); - - // Load the image - const url = new URL(IMAGE_PATH, location.origin); - loadImage(url); - - // Load the Segment Anything pre-computed embedding - Promise.resolve(loadNpyTensor(IMAGE_EMBEDDING, "float32")).then( - (embedding) => setTensor(embedding) - ); - }, []); - - const loadImage = async (url: URL) => { - try { - const img = new Image(); - img.src = url.href; - img.onload = () => { - const { height, width, samScale } = handleImageScale(img); - setModelScale({ - height: height, // original image height - width: width, // original image width - samScale: samScale, // scaling factor for image which has been resized to longest side 1024 - }); - img.width = width; - img.height = height; - setImage(img); - }; - } catch (error) { - console.log(error); - } - }; - - // Decode a Numpy file into a tensor. - const loadNpyTensor = async (tensorFile: string, dType: string) => { - let npLoader = new npyjs(); - const npArray = await npLoader.load(tensorFile); - const tensor = new ort.Tensor(dType, npArray.data, npArray.shape); - return tensor; - }; - - // Run the ONNX model every time clicks has changed - useEffect(() => { - runONNX(); - }, [clicks]); - - const runONNX = async () => { - try { - if ( - model === null || - clicks === null || - tensor === null || - modelScale === null - ) - return; - else { - // Preapre the model input in the correct format for SAM. - // The modelData function is from onnxModelAPI.tsx. - const feeds = modelData({ - clicks, - tensor, - modelScale, - }); - if (feeds === undefined) return; - // Run the SAM ONNX model with the feeds returned from modelData() - const results = await model.run(feeds); - const output = results[model.outputNames[0]]; - // The predicted mask returned from the ONNX model is an array which is - // rendered as an HTML image using onnxMaskToImage() from maskUtils.tsx. - setMaskImg(onnxMaskToImage(output.data, output.dims[2], output.dims[3])); - } - } catch (e) { - console.log(e); - } - }; - - return ; -}; - -export default App; diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/data/__init__.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/data/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/inference.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/inference.py deleted file mode 100644 index d4cbf2a34af5fa4f19566675a7b1bb1ce53eb3f4..0000000000000000000000000000000000000000 --- a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/inference.py +++ /dev/null @@ -1,326 +0,0 @@ -"""Inference for FastChat models.""" -import abc -from typing import Optional -import warnings - -import torch - -try: - from transformers import ( - AutoTokenizer, - AutoModelForCausalLM, - LlamaTokenizer, - LlamaForCausalLM, - AutoModel, - AutoModelForSeq2SeqLM, - AutoConfig, - ) -except ImportError: - from transformers import ( - AutoTokenizer, - AutoModelForCausalLM, - LLaMATokenizer, - LLamaForCausalLM, - AutoModel, - AutoModelForSeq2SeqLM, - AutoConfig, - ) - -from fastchat.conversation import ( - conv_templates, - get_default_conv_template, - compute_skip_echo_len, - SeparatorStyle, -) -from fastchat.serve.compression import load_compress_model -from fastchat.serve.monkey_patch_non_inplace import ( - replace_llama_attn_with_non_inplace_operations, -) -from fastchat.serve.serve_chatglm import chatglm_generate_stream - - -def raise_warning_for_old_weights(model_path, model): - if "vicuna" in model_path.lower(): - try: - is_vicuna = isinstance(model, LlamaForCausalLM) - except Exception: - is_vicuna = isinstance(model, LLamaForCausalLM) - if is_vicuna and model.model.vocab_size > 32000: - warnings.warn( - "\nYou are probably using the old Vicuna-v0 model, " - "which will generate unexpected results with the " - "current fschat.\nYou can try one of the following methods:\n" - "1. Upgrade your weights to the new Vicuna-v1.1: https://github.com/lm-sys/FastChat#vicuna-weights.\n" - "2. Use the old conversation template by `python3 -m fastchat.serve.cli --model-path /path/to/vicuna-v0 --conv-template conv_one_shot`\n" - "3. Downgrade fschat to fschat==0.1.10 (Not recommonded).\n" - ) - - -def get_gpu_memory(max_gpus=None): - gpu_memory = [] - num_gpus = ( - torch.cuda.device_count() - if max_gpus is None - else min(max_gpus, torch.cuda.device_count()) - ) - - for gpu_id in range(num_gpus): - with torch.cuda.device(gpu_id): - device = torch.cuda.current_device() - gpu_properties = torch.cuda.get_device_properties(device) - total_memory = gpu_properties.total_memory / (1024**3) - allocated_memory = torch.cuda.memory_allocated() / (1024**3) - available_memory = total_memory - allocated_memory - gpu_memory.append(available_memory) - return gpu_memory - - -def load_model( - model_path, device, num_gpus, max_gpu_memory=None, load_8bit=False, debug=False -): - if device == "cpu": - kwargs = {"torch_dtype": torch.float32} - elif device == "cuda": - kwargs = {"torch_dtype": torch.float16} - if num_gpus == "auto": - kwargs["device_map"] = "auto" - else: - num_gpus = int(num_gpus) - if num_gpus != 1: - kwargs["device_map"] = "auto" - if max_gpu_memory is None: - kwargs[ - "device_map" - ] = "sequential" # This is important for not the same VRAM sizes - available_gpu_memory = get_gpu_memory(num_gpus) - kwargs["max_memory"] = { - i: str(int(available_gpu_memory[i] * 0.85)) + "GiB" - for i in range(num_gpus) - } - else: - kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)} - print("init_kwargs", kwargs) - elif device == "mps": - kwargs = {"torch_dtype": torch.float16} - # Avoid bugs in mps backend by not using in-place operations. - replace_llama_attn_with_non_inplace_operations() - else: - raise ValueError(f"Invalid device: {device}") - - if load_8bit: - if num_gpus != 1 and num_gpus != "1": - warnings.warn("8-bit quantization is not supported for multi-gpu inference.") - else: - return load_compress_model(model_path=model_path, device=device, torch_dtype=kwargs["torch_dtype"]) - - if "chatglm" in model_path: - tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) - model = AutoModel.from_pretrained( - model_path, trust_remote_code=True, **kwargs - ).cuda() - elif "google/flan-t5" in model_path: - tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) - model = AutoModelForSeq2SeqLM.from_pretrained( - model_path, low_cpu_mem_usage=True, **kwargs - ) - elif "dolly" in model_path: - tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) - model = AutoModelForCausalLM.from_pretrained( - model_path, low_cpu_mem_usage=True, **kwargs - ) - # 50277 means "### End" - tokenizer.eos_token_id = 50277 - elif "pythia" in model_path or "stablelm" in model_path: - tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) - model = AutoModelForCausalLM.from_pretrained( - model_path, low_cpu_mem_usage=True, **kwargs - ) - else: - tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) - model = AutoModelForCausalLM.from_pretrained( - model_path, low_cpu_mem_usage=True, **kwargs - ) - raise_warning_for_old_weights(model_path, model) - - - if (device == "cuda" and num_gpus == 1) or device == "mps": - model.to(device) - - if debug: - print(model) - - return model, tokenizer - - -@torch.inference_mode() -def generate_stream( - model, tokenizer, params, device, context_len=2048, stream_interval=2 -): - prompt = params["prompt"] - l_prompt = len(prompt) - temperature = float(params.get("temperature", 1.0)) - max_new_tokens = int(params.get("max_new_tokens", 256)) - stop_str = params.get("stop", None) - stop_token_ids = params.get("stop_ids", [tokenizer.eos_token_id]) - - input_ids = tokenizer(prompt).input_ids - output_ids = list(input_ids) - - max_src_len = context_len - max_new_tokens - 8 - input_ids = input_ids[-max_src_len:] - - for i in range(max_new_tokens): - if i == 0: - if model.config.is_encoder_decoder: - encoder_outputs = model.encoder( - input_ids=torch.as_tensor([input_ids], device=device) - ) - out = model( - torch.as_tensor([input_ids], device=device), - decoder_input_ids=torch.as_tensor( - [[model.generation_config.decoder_start_token_id]], - device=device, - ), - encoder_outputs=encoder_outputs, - use_cache=True, - ) - logits = out.logits - past_key_values = out.past_key_values - else: - out = model(torch.as_tensor([input_ids], device=device), use_cache=True) - logits = out.logits - past_key_values = out.past_key_values - else: - if model.config.is_encoder_decoder: - out = model( - input_ids=torch.as_tensor([input_ids], device=device), - use_cache=True, - encoder_outputs=encoder_outputs, - decoder_input_ids=torch.as_tensor([[token]], device=device), - past_key_values=past_key_values, - ) - logits = out.logits - past_key_values = out.past_key_values - else: - out = model( - input_ids=torch.as_tensor([[token]], device=device), - use_cache=True, - past_key_values=past_key_values, - ) - logits = out.logits - past_key_values = out.past_key_values - - last_token_logits = logits[0][-1] - - if device == "mps": - # Switch to CPU by avoiding some bugs in mps backend. - last_token_logits = last_token_logits.float().to("cpu") - - if temperature < 1e-4: - token = int(torch.argmax(last_token_logits)) - else: - probs = torch.softmax(last_token_logits / temperature, dim=-1) - token = int(torch.multinomial(probs, num_samples=1)) - - output_ids.append(token) - - if token in stop_token_ids: - stopped = True - else: - stopped = False - - if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped: - output = tokenizer.decode(output_ids, skip_special_tokens=True) - if stop_str: - pos = output.rfind(stop_str, l_prompt) - if pos != -1: - output = output[:pos] - stopped = True - yield output - - if stopped: - break - - del past_key_values - - -class ChatIO(abc.ABC): - @abc.abstractmethod - def prompt_for_input(self, role: str) -> str: - """Prompt for input from a role.""" - - @abc.abstractmethod - def prompt_for_output(self, role: str): - """Prompt for output from a role.""" - - @abc.abstractmethod - def stream_output(self, output_stream, skip_echo_len: int): - """Stream output.""" - - -def chat_loop( - model_path: str, - device: str, - num_gpus: str, - max_gpu_memory: str, - load_8bit: bool, - conv_template: Optional[str], - temperature: float, - max_new_tokens: int, - chatio: ChatIO, - debug: bool, -): - # Model - model, tokenizer = load_model( - model_path, device, num_gpus, max_gpu_memory, load_8bit, debug - ) - is_chatglm = "chatglm" in str(type(model)).lower() - - # Chat - if conv_template: - conv = conv_templates[conv_template].copy() - else: - conv = get_default_conv_template(model_path).copy() - - while True: - try: - inp = chatio.prompt_for_input(conv.roles[0]) - except EOFError: - inp = "" - if not inp: - print("exit...") - break - - conv.append_message(conv.roles[0], inp) - conv.append_message(conv.roles[1], None) - - if is_chatglm: - prompt = conv.messages[conv.offset :] - generate_stream_func = chatglm_generate_stream - else: - generate_stream_func = generate_stream - prompt = conv.get_prompt() - - skip_echo_len = compute_skip_echo_len(model_path, conv, prompt) - stop_str = ( - conv.sep - if conv.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.BAIZE] - else None - ) - - params = { - "model": model_path, - "prompt": prompt, - "temperature": temperature, - "max_new_tokens": max_new_tokens, - "stop": stop_str, - } - - chatio.prompt_for_output(conv.roles[1]) - output_stream = generate_stream_func(model, tokenizer, params, device) - outputs = chatio.stream_output(output_stream, skip_echo_len) - # NOTE: strip is important to align with the training data. - conv.messages[-1][-1] = outputs.strip() - - if debug: - print("\n", {"prompt": prompt, "outputs": outputs}, "\n") diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/ops/upfirdn2d/__init__.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/ops/upfirdn2d/__init__.py deleted file mode 100644 index 397e85bea063e97fc4c12ad4d3e15669b69290bd..0000000000000000000000000000000000000000 --- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/ops/upfirdn2d/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .upfirdn2d import upfirdn2d - -__all__ = ['upfirdn2d'] diff --git a/spaces/JavaFXpert/NimGPT-3.5/nim_gpt_functions.py b/spaces/JavaFXpert/NimGPT-3.5/nim_gpt_functions.py deleted file mode 100644 index 60aac859d9ef0bec0ab89e1fc777f5ae5a6c5b98..0000000000000000000000000000000000000000 --- a/spaces/JavaFXpert/NimGPT-3.5/nim_gpt_functions.py +++ /dev/null @@ -1,74 +0,0 @@ -from langchain import OpenAI -from langchain.prompts import PromptTemplate, FewShotPromptTemplate -from langchain.chains import LLMChain - -EXAMPLES_PROMPT_TEMPLATE = PromptTemplate( - input_variables=["input", "output"], - template="Input: {input}\nOutput: {output}" -) - -PLAN_MOVE_PROMPT_EXAMPLES = [ - {"input": "The piles contain 3, 5, 7 sticks", "output": "I'll take one stick from pile A"}, - {"input": "The piles contain 2, 5, 7 sticks", "output": "I'll take one stick from pile B"}, - {"input": "The piles contain 2, 5, 7 sticks", "output": "I'll take five stick from pile B"}, - {"input": "The piles contain 1, 2, 3 sticks", "output": "I'll take two sticks from pile C"}, - {"input": "The piles contain 0, 2, 3 sticks", "output": "I'll take one stick from pile C"}, - {"input": "The piles contain 0, 2, 0 sticks", "output": "I'll take two sticks from pile B"}, -] - -PLAN_MOVE_PROMPT_FROM_STRING_EXAMPLES = FewShotPromptTemplate( - examples=PLAN_MOVE_PROMPT_EXAMPLES, - example_prompt=EXAMPLES_PROMPT_TEMPLATE, - prefix="Nim is a two-player game of strategy in which players take turns removing objects from separate piles. " - "The goal of the game is to remove the last sticks from a pile when the other piles contain 0 sticks. Each " - "of these inputs represent a game state. For each of these game states please express a logical move that " - "consists of taking some number of sticks from a pile. " - "You may not take any sticks from a pile that contains 0 sticks. " - "You may not take more sticks from a pile than it contains. " - "You may only take sticks from one pile. ", - suffix="Input: {text_game_state}\nOutput:", - input_variables=["text_game_state"], - example_separator="\n\n" -) - -EXEC_MOVE_PROMPT_EXAMPLES = [ - {"input": "I'll take two sticks from pile A", "output": "0,2"}, - {"input": "I'll take 3 sticks from the first pile", "output": "0,3"}, - {"input": "I'll take two sticks from pile C", "output": "2,2"}, - {"input": "I'll take one stick from the third pile", "output": "2,1"}, - {"input": "From pile B remove 2 sticks", "output": "1,2"}, - {"input": "I'll take the last stick from pile C", "output": "2,1"}, -] - -EXEC_MOVE_PROMPT_FROM_STRING_EXAMPLES = FewShotPromptTemplate( - examples=EXEC_MOVE_PROMPT_EXAMPLES, - example_prompt=EXAMPLES_PROMPT_TEMPLATE, - prefix="Express every input as two numbers separated by a comma, where the first number is the zero index pile " - "number and the second number is the number of sticks to remove.", - suffix="Input: {move_to_express}\nOutput:", - input_variables=["move_to_express"], - example_separator="\n\n" -) - - -def plan_move(text_game_state, temperature, api_key): - llm = OpenAI(model_name='text-davinci-003', temperature=temperature, max_tokens=100, - openai_api_key=api_key) - llm_chain = LLMChain(llm=llm, prompt=PLAN_MOVE_PROMPT_FROM_STRING_EXAMPLES, verbose=False) - planned_move = llm_chain.run({'text_game_state': text_game_state}).strip() - return planned_move - - -def execute_move(move_to_express, nim_game_env, api_key): - llm = OpenAI(model_name='text-davinci-003', temperature=0.0, max_tokens=10, - openai_api_key=api_key) - llm_chain = LLMChain(llm=llm, prompt=EXEC_MOVE_PROMPT_FROM_STRING_EXAMPLES, verbose=False) - step_tuple_str = llm_chain.run({'move_to_express': move_to_express}) - step_tuple = tuple(int(x) for x in step_tuple_str.split(',')) - try: - step_result = nim_game_env.step(step_tuple) - except ValueError: - return "Invalid move!", [0, 0, 0], 0, True, None - - text_observation = "The piles contain " + ", ".join(str(x) for x in step_result[0]) + " sticks." - return text_observation, step_result[0], step_result[1], step_result[2], step_result[3] diff --git a/spaces/JavierIA/gccopen/utils/plots.py b/spaces/JavierIA/gccopen/utils/plots.py deleted file mode 100644 index fdd8d0e853deb228badeeed52fbbe5fb8eb10632..0000000000000000000000000000000000000000 --- a/spaces/JavierIA/gccopen/utils/plots.py +++ /dev/null @@ -1,489 +0,0 @@ -# Plotting utils - -import glob -import math -import os -import random -from copy import copy -from pathlib import Path - -import cv2 -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sns -import torch -import yaml -from PIL import Image, ImageDraw, ImageFont -from scipy.signal import butter, filtfilt - -from utils.general import xywh2xyxy, xyxy2xywh -from utils.metrics import fitness - -# Settings -matplotlib.rc('font', **{'size': 11}) -matplotlib.use('Agg') # for writing to files only - - -def color_list(): - # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb - def hex2rgb(h): - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) - - return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949) - - -def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png - xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) - hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) - xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) - yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) - return np.log(hist[xidx, yidx]) - - -def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): - # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy - def butter_lowpass(cutoff, fs, order): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) - - b, a = butter_lowpass(cutoff, fs, order=order) - return filtfilt(b, a, data) # forward-backward filter - - -def plot_one_box(x, img, color=None, label=None, line_thickness=3): - # Plots one bounding box on image img - tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness - color = color or [random.randint(0, 255) for _ in range(3)] - c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) - cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) - if label: - tf = max(tl - 1, 1) # font thickness - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 - cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) - - -def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None): - img = Image.fromarray(img) - draw = ImageDraw.Draw(img) - line_thickness = line_thickness or max(int(min(img.size) / 200), 2) - draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot - if label: - fontsize = max(round(max(img.size) / 40), 12) - font = ImageFont.truetype("Arial.ttf", fontsize) - txt_width, txt_height = font.getsize(label) - draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) - draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) - return np.asarray(img) - - -def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() - # Compares the two methods for width-height anchor multiplication - # https://github.com/ultralytics/yolov3/issues/168 - x = np.arange(-4.0, 4.0, .1) - ya = np.exp(x) - yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 - - fig = plt.figure(figsize=(6, 3), tight_layout=True) - plt.plot(x, ya, '.-', label='YOLOv3') - plt.plot(x, yb ** 2, '.-', label='YOLOR ^2') - plt.plot(x, yb ** 1.6, '.-', label='YOLOR ^1.6') - plt.xlim(left=-4, right=4) - plt.ylim(bottom=0, top=6) - plt.xlabel('input') - plt.ylabel('output') - plt.grid() - plt.legend() - fig.savefig('comparison.png', dpi=200) - - -def output_to_target(output): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] - targets = [] - for i, o in enumerate(output): - for *box, conf, cls in o.cpu().numpy(): - targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) - return np.array(targets) - - -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): - # Plot image grid with labels - - if isinstance(images, torch.Tensor): - images = images.cpu().float().numpy() - if isinstance(targets, torch.Tensor): - targets = targets.cpu().numpy() - - # un-normalise - if np.max(images[0]) <= 1: - images *= 255 - - tl = 3 # line thickness - tf = max(tl - 1, 1) # font thickness - bs, _, h, w = images.shape # batch size, _, height, width - bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) - - # Check if we should resize - scale_factor = max_size / max(h, w) - if scale_factor < 1: - h = math.ceil(scale_factor * h) - w = math.ceil(scale_factor * w) - - colors = color_list() # list of colors - mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, img in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break - - block_x = int(w * (i // ns)) - block_y = int(h * (i % ns)) - - img = img.transpose(1, 2, 0) - if scale_factor < 1: - img = cv2.resize(img, (w, h)) - - mosaic[block_y:block_y + h, block_x:block_x + w, :] = img - if len(targets) > 0: - image_targets = targets[targets[:, 0] == i] - boxes = xywh2xyxy(image_targets[:, 2:6]).T - classes = image_targets[:, 1].astype('int') - labels = image_targets.shape[1] == 6 # labels if no conf column - conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) - - if boxes.shape[1]: - if boxes.max() <= 1.01: # if normalized with tolerance 0.01 - boxes[[0, 2]] *= w # scale to pixels - boxes[[1, 3]] *= h - elif scale_factor < 1: # absolute coords need scale if image scales - boxes *= scale_factor - boxes[[0, 2]] += block_x - boxes[[1, 3]] += block_y - for j, box in enumerate(boxes.T): - cls = int(classes[j]) - color = colors[cls % len(colors)] - cls = names[cls] if names else cls - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) - plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) - - # Draw image filename labels - if paths: - label = Path(paths[i]).name[:40] # trim to 40 char - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, - lineType=cv2.LINE_AA) - - # Image border - cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) - - if fname: - r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size - mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) - # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save - Image.fromarray(mosaic).save(fname) # PIL save - return mosaic - - -def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): - # Plot LR simulating training for full epochs - optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals - y = [] - for _ in range(epochs): - scheduler.step() - y.append(optimizer.param_groups[0]['lr']) - plt.plot(y, '.-', label='LR') - plt.xlabel('epoch') - plt.ylabel('LR') - plt.grid() - plt.xlim(0, epochs) - plt.ylim(0) - plt.savefig(Path(save_dir) / 'LR.png', dpi=200) - plt.close() - - -def plot_test_txt(): # from utils.plots import *; plot_test() - # Plot test.txt histograms - x = np.loadtxt('test.txt', dtype=np.float32) - box = xyxy2xywh(x[:, :4]) - cx, cy = box[:, 0], box[:, 1] - - fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) - ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) - ax.set_aspect('equal') - plt.savefig('hist2d.png', dpi=300) - - fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) - ax[0].hist(cx, bins=600) - ax[1].hist(cy, bins=600) - plt.savefig('hist1d.png', dpi=200) - - -def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() - # Plot targets.txt histograms - x = np.loadtxt('targets.txt', dtype=np.float32).T - s = ['x targets', 'y targets', 'width targets', 'height targets'] - fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) - ax = ax.ravel() - for i in range(4): - ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) - ax[i].legend() - ax[i].set_title(s[i]) - plt.savefig('targets.jpg', dpi=200) - - -def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() - # Plot study.txt generated by test.py - fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) - # ax = ax.ravel() - - fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]: - for f in sorted(Path(path).glob('study*.txt')): - y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T - x = np.arange(y.shape[1]) if x is None else np.array(x) - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] - # for i in range(7): - # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - # ax[i].set_title(s[i]) - - j = y[3].argmax() + 1 - ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, - label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) - - ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') - - ax2.grid(alpha=0.2) - ax2.set_yticks(np.arange(20, 60, 5)) - ax2.set_xlim(0, 57) - ax2.set_ylim(30, 55) - ax2.set_xlabel('GPU Speed (ms/img)') - ax2.set_ylabel('COCO AP val') - ax2.legend(loc='lower right') - plt.savefig(str(Path(path).name) + '.png', dpi=300) - - -def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): - # plot dataset labels - print('Plotting labels... ') - c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes - nc = int(c.max() + 1) # number of classes - colors = color_list() - x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) - - # seaborn correlogram - sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) - plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) - plt.close() - - # matplotlib labels - matplotlib.use('svg') # faster - ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() - ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - ax[0].set_ylabel('instances') - if 0 < len(names) < 30: - ax[0].set_xticks(range(len(names))) - ax[0].set_xticklabels(names, rotation=90, fontsize=10) - else: - ax[0].set_xlabel('classes') - sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) - - # rectangles - labels[:, 1:3] = 0.5 # center - labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 - img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) - for cls, *box in labels[:1000]: - ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot - ax[1].imshow(img) - ax[1].axis('off') - - for a in [0, 1, 2, 3]: - for s in ['top', 'right', 'left', 'bottom']: - ax[a].spines[s].set_visible(False) - - plt.savefig(save_dir / 'labels.jpg', dpi=200) - matplotlib.use('Agg') - plt.close() - - # loggers - for k, v in loggers.items() or {}: - if k == 'wandb' and v: - v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) - - -def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() - # Plot hyperparameter evolution results in evolve.txt - with open(yaml_file) as f: - hyp = yaml.load(f, Loader=yaml.SafeLoader) - x = np.loadtxt('evolve.txt', ndmin=2) - f = fitness(x) - # weights = (f - f.min()) ** 2 # for weighted results - plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - for i, (k, v) in enumerate(hyp.items()): - y = x[:, i + 7] - # mu = (y * weights).sum() / weights.sum() # best weighted result - mu = y[f.argmax()] # best single result - plt.subplot(6, 5, i + 1) - plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters - if i % 5 != 0: - plt.yticks([]) - print('%15s: %.3g' % (k, mu)) - plt.savefig('evolve.png', dpi=200) - print('\nPlot saved as evolve.png') - - -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): - # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() - ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows - n = results.shape[1] # number of rows - x = np.arange(start, min(stop, n) if stop else n) - results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s - results[0] = x - for i, a in enumerate(ax): - if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) - a.set_title(s[i]) - a.set_xlabel('time (s)') - # if fi == len(files) - 1: - # a.set_ylim(bottom=0) - for side in ['top', 'right']: - a.spines[side].set_visible(False) - else: - a.remove() - except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) - - ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - - -def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() - # Plot training 'results*.txt', overlaying train and val losses - s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends - t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles - for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): - results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) - ax = ax.ravel() - for i in range(5): - for j in [i, i + 5]: - y = results[j, x] - ax[i].plot(x, y, marker='.', label=s[j]) - # y_smooth = butter_lowpass_filtfilt(y) - # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) - - ax[i].set_title(t[i]) - ax[i].legend() - ax[i].set_ylabel(f) if i == 0 else None # add filename - fig.savefig(f.replace('.txt', '.png'), dpi=200) - - -def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): - # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') - fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) - ax = ax.ravel() - s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', - 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] - if bucket: - # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] - files = ['results%g.txt' % x for x in id] - c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) - os.system(c) - else: - files = list(Path(save_dir).glob('results*.txt')) - assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - for i in range(10): - y = results[i, x] - if i in [0, 1, 2, 5, 6, 7]: - y[y == 0] = np.nan # don't show zero loss values - # y /= y[0] # normalize - label = labels[fi] if len(labels) else f.stem - ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) - ax[i].set_title(s[i]) - # if i in [5, 6, 7]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) - - ax[1].legend() - fig.savefig(Path(save_dir) / 'results.png', dpi=200) - - -def output_to_keypoint(output): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] - targets = [] - for i, o in enumerate(output): - kpts = o[:,6:] - o = o[:,:6] - for index, (*box, conf, cls) in enumerate(o.detach().cpu().numpy()): - targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf, *list(kpts.detach().cpu().numpy()[index])]) - return np.array(targets) - - -def plot_skeleton_kpts(im, kpts, steps, orig_shape=None): - #Plot the skeleton and keypointsfor coco datatset - palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], - [230, 230, 0], [255, 153, 255], [153, 204, 255], - [255, 102, 255], [255, 51, 255], [102, 178, 255], - [51, 153, 255], [255, 153, 153], [255, 102, 102], - [255, 51, 51], [153, 255, 153], [102, 255, 102], - [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], - [255, 255, 255]]) - - skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], - [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], - [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] - - pose_limb_color = palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]] - pose_kpt_color = palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]] - radius = 5 - num_kpts = len(kpts) // steps - - for kid in range(num_kpts): - r, g, b = pose_kpt_color[kid] - x_coord, y_coord = kpts[steps * kid], kpts[steps * kid + 1] - if not (x_coord % 640 == 0 or y_coord % 640 == 0): - if steps == 3: - conf = kpts[steps * kid + 2] - if conf < 0.5: - continue - cv2.circle(im, (int(x_coord), int(y_coord)), radius, (int(r), int(g), int(b)), -1) - - for sk_id, sk in enumerate(skeleton): - r, g, b = pose_limb_color[sk_id] - pos1 = (int(kpts[(sk[0]-1)*steps]), int(kpts[(sk[0]-1)*steps+1])) - pos2 = (int(kpts[(sk[1]-1)*steps]), int(kpts[(sk[1]-1)*steps+1])) - if steps == 3: - conf1 = kpts[(sk[0]-1)*steps+2] - conf2 = kpts[(sk[1]-1)*steps+2] - if conf1<0.5 or conf2<0.5: - continue - if pos1[0]%640 == 0 or pos1[1]%640==0 or pos1[0]<0 or pos1[1]<0: - continue - if pos2[0] % 640 == 0 or pos2[1] % 640 == 0 or pos2[0]<0 or pos2[1]<0: - continue - cv2.line(im, pos1, pos2, (int(r), int(g), int(b)), thickness=2) diff --git a/spaces/JeffJing/ZookChatBot/steamship/invocable/invocable_response.py b/spaces/JeffJing/ZookChatBot/steamship/invocable/invocable_response.py deleted file mode 100644 index fad7d90c82bd15c51372267c44f88c8349c9d612..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/invocable/invocable_response.py +++ /dev/null @@ -1,231 +0,0 @@ -from __future__ import annotations - -import io -import json -import logging -from typing import Any, Dict, Generic, Optional, TypeVar, Union - -from pydantic import BaseModel -from pydantic.generics import GenericModel - -from steamship.base import MimeTypes, SteamshipError, Task, TaskState -from steamship.base.client import Client -from steamship.base.error import DEFAULT_ERROR_MESSAGE -from steamship.base.mime_types import ContentEncodings -from steamship.base.model import CamelModel -from steamship.utils.binary_utils import flexi_create - - -class Http(CamelModel): - status: int = None - # If true, we're signaling to the Steamship Proxy that the `data` field of the SteamshipResponse object - # has been wrapped in base64. In this situation, we can return the bytes within directly to the Proxy - # caller without interpreting it. - base64_wrapped: bool = None - headers: Dict[str, str] = None - - -T = TypeVar("T") - - -class InvocableResponse(GenericModel, Generic[T]): - """Mirrors the Response object in the Steamship server.""" - - data: T = None # Data for successful or synchronous requests. - status: Task = None # Reporting for errors and async status - http: Http = None # Additional HTTP information for Steamship Proxy (headers, etc) - - def __init__( - self, - status: Task = None, - error: SteamshipError = None, - http: Http = None, - data: Any = None, - string: str = None, - json: Any = None, - _bytes: Union[bytes, io.BytesIO] = None, - mime_type=None, - ): - super().__init__() - # Note: - # This function has to be very defensively coded since Any errors thrown here will not be returned - # to the end-user via our proxy (as this is the constructor for the response itself!) - if http is not None: - self.http = http - else: - self.http = Http(status=200, headers={}) - - try: - self.set_data(data=data, string=string, json=json, _bytes=_bytes, mime_type=mime_type) - except Exception as ex: - logging.error("Exception within Response.__init__.", exc_info=ex) - if error is not None: - if error.message: - error.message = f"{error.message}. Also found error - unable to serialize data to response. {ex}" - else: - error.message = f"Unable to serialize data to response. {ex}" - else: - error = SteamshipError(message=f"Unable to serialize data to response. {ex}") - logging.error(error, exc_info=error) - - # Handle the task provided - if status is None: - self.status = Task() - elif isinstance(status, Task): - self.status = status - else: - self.status = Task() - self.status.state = TaskState.failed - self.status.status_message = ( - f"Status field of response should be of type Task. " - f"Instead was of type {type(status)} and had value {status}." - ) - - if error: - self.status.state = TaskState.failed - self.status.status_message = error.message - self.status.status_suggestion = error.suggestion - self.status.status_code = error.code - logging.error( - "steamship.invocable.response - Response created with error.", exc_info=error - ) - else: - if self.status.state is None: - self.status.state = TaskState.succeeded - - def set_data( - self, - data: Any = None, - string: str = None, - json: Any = None, - _bytes: Union[bytes, io.BytesIO] = None, - mime_type=None, - ): - data, mime_type, encoding = flexi_create( - data=data, string=string, json=json, _bytes=_bytes, mime_type=mime_type - ) - - self.data = data - - self.http.headers = self.http.headers or {} - self.http.headers["Content-Type"] = mime_type or MimeTypes.BINARY - - if encoding == ContentEncodings.BASE64: - self.http.base64_wrapped = True - - @staticmethod - def error( - code: int, - message: Optional[str] = None, - error: Optional[SteamshipError] = None, - exception: Optional[Exception] = None, - prefix: Optional[str] = None, - ) -> InvocableResponse[T]: - """Merges a number of error channels into one unified Response object. - - Aggregates all possible messages into a single " | "-delimeted error message. - - If the final resulting error message is non-null, prefixes with the provided `prefix` - """ - # Use or create the return error - error = error or SteamshipError() - - messages = [] - if error.message != DEFAULT_ERROR_MESSAGE: - messages.append(error.message) - - # Set or append the additional message - if message is not None and message not in messages: - messages.append(message) - - # Set or append the exception - if exception is not None: - exception_str = f"{exception}" - if exception_str not in messages: - messages.append(exception_str) - - messages = [m.strip() for m in messages if m is not None and len(m.strip())] - if len(messages) > 0: - error.message = " | ".join(messages) - - # Finally, add the prefix if requested. - if prefix and error.message: - error.message = f"{prefix}{error.message}" - - return InvocableResponse(error=error, http=Http(status=code)) - - @staticmethod - def from_obj(obj: Any) -> InvocableResponse: # noqa: C901 - if obj is None: - return InvocableResponse.error(500, "Handler provided no response.") - - if isinstance(obj, InvocableResponse): - return obj - elif isinstance(obj, SteamshipError): - return InvocableResponse.error(500, error=obj) - elif isinstance(obj, Exception): - return InvocableResponse.error(500, error=SteamshipError(error=obj)) - elif isinstance(obj, io.BytesIO): - return InvocableResponse(_bytes=obj) - elif isinstance(obj, dict): - return InvocableResponse(json=obj) - elif isinstance(obj, list): - return InvocableResponse(json=obj) - elif isinstance(obj, str): - return InvocableResponse(string=obj) - elif isinstance(obj, (float, int, bool)): - return InvocableResponse(json=obj) - elif isinstance(obj, CamelModel): - return InvocableResponse(json=obj.dict(by_alias=True)) - elif isinstance(obj, BaseModel): - return InvocableResponse(json=obj.dict()) - - return InvocableResponse.error( - 500, message=f"Handler provided unknown response type: {type(obj)}" - ) - - def post_update(self, client: Client): - """Pushes this response object to the corresponding Task on the Steamship Engine. - - Typically apps and plugins return their results to the Engine synchronously via HTTP. - But sometimes that's not practice -- for example: - - - Microsoft's OCR endpoint returns a Job Token that can be exchanged for updates, and eventually a result. - - Google's AutoML can take 20-30 minutes to train. - - Fine-tuning BERT on ECS can take an arbitrarily long amount of time. - - In these cases, it can be useful for the package/plugin to occasionally post updates to the Engine outside - of the Engine's initial synchronous request-response conversation. - """ - if self.status is None or self.status.task_id is None: - raise SteamshipError( - message="An App/Plugin response can only be pushed to the Steamship Engine if " - + "it is associated with a Task. Please set the `status.task_id` field." - ) - if client is None: - raise SteamshipError( - message="Unable to push Response to Steamship: Associated client is None" - ) - - # Create a task object - task = Task(client=client, task_id=self.status.task_id) - update_fields = set() - - if self.status.state is not None: - task.state = self.status.state - update_fields.add("state") - - if self.status.status_message is not None: - task.status_message = self.status.status_message - update_fields.add("status_message") - - if self.status.status_suggestion is not None: - task.status_suggestion = self.status.status_suggestion - update_fields.add("status_suggestion") - - if self.data is not None: - # This object itself should always be the output of the Training Task object. - task.output = json.dumps(self.data) - update_fields.add("output") - - task.post_update(fields=update_fields) diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT/assets/html/appearance_switcher.html b/spaces/JohnSmith9982/ChuanhuChatGPT/assets/html/appearance_switcher.html deleted file mode 100644 index 9375071fbdfda7bfd622d7f7bd2dfdd0c494341b..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT/assets/html/appearance_switcher.html +++ /dev/null @@ -1,11 +0,0 @@ -
- - {label} - - - - -
diff --git a/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/losses.py b/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/JohnnyPittt/audio-styling/deepafx_st/version.py b/spaces/JohnnyPittt/audio-styling/deepafx_st/version.py deleted file mode 100644 index ed6b02e21b2774da461e61606451eada5ebc9e18..0000000000000000000000000000000000000000 --- a/spaces/JohnnyPittt/audio-styling/deepafx_st/version.py +++ /dev/null @@ -1,6 +0,0 @@ -# !/usr/bin/env python -# -*- coding: utf-8 -*- -'''Version info''' - -short_version = '0.0' -version = '0.0.1' diff --git a/spaces/JunghunleePhD/testfordocker/setup-docker.bash b/spaces/JunghunleePhD/testfordocker/setup-docker.bash deleted file mode 100644 index 9498b0f4e9928affd740faafd8099c35c5e6defe..0000000000000000000000000000000000000000 --- a/spaces/JunghunleePhD/testfordocker/setup-docker.bash +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -NAME="testfordocker" -PORT=7860 -APP_PATH="/app" - -## 1. stop old container -docker stop $NAME - -## 2. remove old container and images -docker rm $NAME && docker rmi $NAME - -## 3. build new image -docker build -t $NAME . - -## 4. run container -docker run -itd --name $NAME -p $PORT:7860 -v "$(pwd)/app":$APP_PATH $NAME - -echo "CONTAINER IS RUNNING AS $NAME ON $PORT" \ No newline at end of file diff --git a/spaces/Kay2048/IKay/upcunet_v3.py b/spaces/Kay2048/IKay/upcunet_v3.py deleted file mode 100644 index f7919a6cc9efe3b8af73a73e30825a4c7d7d76da..0000000000000000000000000000000000000000 --- a/spaces/Kay2048/IKay/upcunet_v3.py +++ /dev/null @@ -1,714 +0,0 @@ -import torch -from torch import nn as nn -from torch.nn import functional as F -import os, sys -import numpy as np - -root_path = os.path.abspath('.') -sys.path.append(root_path) - - -class SEBlock(nn.Module): - def __init__(self, in_channels, reduction=8, bias=False): - super(SEBlock, self).__init__() - self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias) - self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias) - - def forward(self, x): - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half() - else: - x0 = torch.mean(x, dim=(2, 3), keepdim=True) - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - def forward_mean(self, x, x0): - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - -class UNetConv(nn.Module): - def __init__(self, in_channels, mid_channels, out_channels, se): - super(UNetConv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(in_channels, mid_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - nn.Conv2d(mid_channels, out_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - ) - if se: - self.seblock = SEBlock(out_channels, reduction=8, bias=True) - else: - self.seblock = None - - def forward(self, x): - z = self.conv(x) - if self.seblock is not None: - z = self.seblock(z) - return z - - -class UNet1(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet1x3(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1x3, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet2(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet2, self).__init__() - - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 64, 128, se=True) - self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0) - self.conv3 = UNetConv(128, 256, 128, se=True) - self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0) - self.conv4 = UNetConv(128, 64, 64, se=True) - self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv5 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3(x3) - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4(x2 + x3) - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - def forward_a(self, x): # conv234结尾有se - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x2): # conv234结尾有se - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3.conv(x3) - return x3 - - def forward_c(self, x2, x3): # conv234结尾有se - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4.conv(x2 + x3) - return x4 - - def forward_d(self, x1, x4): # conv234结尾有se - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - -class UpCunet2x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet2x, self).__init__() - self.unet1 = UNet1(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 36, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 36, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2] - return res # - - -class UpCunet3x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet3x, self).__init__() - self.unet1 = UNet1x3(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 4 + 1) * 4 - pw = ((w0 - 1) // 4 + 1) * 4 - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除 - else: - crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 28, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 28, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop # - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3] - return res - - -class UpCunet4x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet4x, self).__init__() - self.unet1 = UNet1(in_channels, 64, deconv=True) - self.unet2 = UNet2(64, 64, deconv=False) - self.ps = nn.PixelShuffle(2) - self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True) - - def forward(self, x, tile_mode): - n, c, h0, w0 = x.shape - x00 = x - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - x = self.conv_final(x) - x = F.pad(x, (-1, -1, -1, -1)) - x = self.ps(x) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4] - x += F.interpolate(x00, scale_factor=4, mode='nearest') - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 38, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 38, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - x_crop = self.conv_final(x_crop) - x_crop = F.pad(x_crop, (-1, -1, -1, -1)) - x_crop = self.ps(x_crop) - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape) - res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4] - res += F.interpolate(x00, scale_factor=4, mode='nearest') - return res # - - -class RealWaifuUpScaler(object): - def __init__(self, scale, weight_path, half, device): - weight = torch.load(weight_path, map_location="cpu") - self.model = eval("UpCunet%sx" % scale)() - if (half == True): - self.model = self.model.half().to(device) - else: - self.model = self.model.to(device) - self.model.load_state_dict(weight, strict=True) - self.model.eval() - self.half = half - self.device = device - - def np2tensor(self, np_frame): - if (self.half == False): - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255 - else: - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255 - - def tensor2np(self, tensor): - if (self.half == False): - return ( - np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0))) - else: - return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), - (1, 2, 0))) - - def __call__(self, frame, tile_mode): - with torch.no_grad(): - tensor = self.np2tensor(frame) - result = self.tensor2np(self.model(tensor, tile_mode)) - return result - - -if __name__ == "__main__": - ###########inference_img - import time, cv2, sys - from time import time as ttime - - for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3), - ("weights_v3/up4x-latest-denoise3x.pth", 4)]: - for tile_mode in [0, 1, 2, 3, 4]: - upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0") - input_dir = "%s/input_dir1" % root_path - output_dir = "%s/opt-dir-all-test" % root_path - os.makedirs(output_dir, exist_ok=True) - for name in os.listdir(input_dir): - print(name) - tmp = name.split(".") - inp_path = os.path.join(input_dir, name) - suffix = tmp[-1] - prefix = ".".join(tmp[:-1]) - tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - print(inp_path, tmp_path) - # 支持中文路径 - # os.link(inp_path, tmp_path)#win用硬链接 - os.symlink(inp_path, tmp_path) # linux用软链接 - frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]] - t0 = ttime() - result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1] - t1 = ttime() - print(prefix, "done", t1 - t0) - tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - cv2.imwrite(tmp_opt_path, result) - n = 0 - while (1): - if (n == 0): - suffix = "_%sx_tile%s.png" % (scale, tile_mode) - else: - suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) # - if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False): - break - else: - n += 1 - final_opt_path = os.path.join(output_dir, prefix + suffix) - os.rename(tmp_opt_path, final_opt_path) - os.remove(tmp_path) diff --git a/spaces/Kayson/InstructDiffusion/stable_diffusion/main.py b/spaces/Kayson/InstructDiffusion/stable_diffusion/main.py deleted file mode 100644 index 193c50a86a307bd69f52a0c3b89fb5368ed9a222..0000000000000000000000000000000000000000 --- a/spaces/Kayson/InstructDiffusion/stable_diffusion/main.py +++ /dev/null @@ -1,744 +0,0 @@ -import argparse, os, sys, datetime, glob, importlib, csv -import numpy as np -import time -import torch -import torchvision -import pytorch_lightning as pl - -from packaging import version -from omegaconf import OmegaConf -from torch.utils.data import random_split, DataLoader, Dataset, Subset -from functools import partial -from PIL import Image - -from pytorch_lightning import seed_everything -from pytorch_lightning.trainer import Trainer -from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor -from pytorch_lightning.utilities.distributed import rank_zero_only -from pytorch_lightning.utilities import rank_zero_info - -from ldm.data.base import Txt2ImgIterableBaseDataset -from ldm.util import instantiate_from_config - - -def get_parser(**parser_kwargs): - def str2bool(v): - if isinstance(v, bool): - return v - if v.lower() in ("yes", "true", "t", "y", "1"): - return True - elif v.lower() in ("no", "false", "f", "n", "0"): - return False - else: - raise argparse.ArgumentTypeError("Boolean value expected.") - - parser = argparse.ArgumentParser(**parser_kwargs) - parser.add_argument( - "-n", - "--name", - type=str, - const=True, - default="", - nargs="?", - help="postfix for logdir", - ) - parser.add_argument( - "-r", - "--resume", - type=str, - const=True, - default="", - nargs="?", - help="resume from logdir or checkpoint in logdir", - ) - parser.add_argument( - "-b", - "--base", - nargs="*", - metavar="base_config.yaml", - help="paths to base configs. Loaded from left-to-right. " - "Parameters can be overwritten or added with command-line options of the form `--key value`.", - default=list(), - ) - parser.add_argument( - "-t", - "--train", - type=str2bool, - const=True, - default=False, - nargs="?", - help="train", - ) - parser.add_argument( - "--no-test", - type=str2bool, - const=True, - default=False, - nargs="?", - help="disable test", - ) - parser.add_argument( - "-p", - "--project", - help="name of new or path to existing project" - ) - parser.add_argument( - "-d", - "--debug", - type=str2bool, - nargs="?", - const=True, - default=False, - help="enable post-mortem debugging", - ) - parser.add_argument( - "-s", - "--seed", - type=int, - default=23, - help="seed for seed_everything", - ) - parser.add_argument( - "-f", - "--postfix", - type=str, - default="", - help="post-postfix for default name", - ) - parser.add_argument( - "-l", - "--logdir", - type=str, - default="logs", - help="directory for logging dat shit", - ) - parser.add_argument( - "--scale_lr", - type=str2bool, - nargs="?", - const=True, - default=True, - help="scale base-lr by ngpu * batch_size * n_accumulate", - ) - return parser - - -def nondefault_trainer_args(opt): - parser = argparse.ArgumentParser() - parser = Trainer.add_argparse_args(parser) - args = parser.parse_args([]) - return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k)) - - -class WrappedDataset(Dataset): - """Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset""" - - def __init__(self, dataset): - self.data = dataset - - def __len__(self): - return len(self.data) - - def __getitem__(self, idx): - return self.data[idx] - - -def worker_init_fn(_): - worker_info = torch.utils.data.get_worker_info() - - dataset = worker_info.dataset - worker_id = worker_info.id - - if isinstance(dataset, Txt2ImgIterableBaseDataset): - split_size = dataset.num_records // worker_info.num_workers - # reset num_records to the true number to retain reliable length information - dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size] - current_id = np.random.choice(len(np.random.get_state()[1]), 1) - return np.random.seed(np.random.get_state()[1][current_id] + worker_id) - else: - return np.random.seed(np.random.get_state()[1][0] + worker_id) - - -class DataModuleFromConfig(pl.LightningDataModule): - def __init__(self, batch_size, train=None, validation=None, test=None, predict=None, - wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False, - shuffle_val_dataloader=False): - super().__init__() - self.batch_size = batch_size - self.dataset_configs = dict() - self.num_workers = num_workers if num_workers is not None else batch_size * 2 - self.use_worker_init_fn = use_worker_init_fn - if train is not None: - self.dataset_configs["train"] = train - self.train_dataloader = self._train_dataloader - if validation is not None: - self.dataset_configs["validation"] = validation - self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader) - if test is not None: - self.dataset_configs["test"] = test - self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader) - if predict is not None: - self.dataset_configs["predict"] = predict - self.predict_dataloader = self._predict_dataloader - self.wrap = wrap - - def prepare_data(self): - for data_cfg in self.dataset_configs.values(): - instantiate_from_config(data_cfg) - - def setup(self, stage=None): - self.datasets = dict( - (k, instantiate_from_config(self.dataset_configs[k])) - for k in self.dataset_configs) - if self.wrap: - for k in self.datasets: - self.datasets[k] = WrappedDataset(self.datasets[k]) - - def _train_dataloader(self): - is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset) - if is_iterable_dataset or self.use_worker_init_fn: - init_fn = worker_init_fn - else: - init_fn = None - return DataLoader(self.datasets["train"], batch_size=self.batch_size, - num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True, - worker_init_fn=init_fn) - - def _val_dataloader(self, shuffle=False): - if isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn: - init_fn = worker_init_fn - else: - init_fn = None - return DataLoader(self.datasets["validation"], - batch_size=self.batch_size, - num_workers=self.num_workers, - worker_init_fn=init_fn, - shuffle=shuffle) - - def _test_dataloader(self, shuffle=False): - is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset) - if is_iterable_dataset or self.use_worker_init_fn: - init_fn = worker_init_fn - else: - init_fn = None - - # do not shuffle dataloader for iterable dataset - shuffle = shuffle and (not is_iterable_dataset) - - return DataLoader(self.datasets["test"], batch_size=self.batch_size, - num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle) - - def _predict_dataloader(self, shuffle=False): - if isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn: - init_fn = worker_init_fn - else: - init_fn = None - return DataLoader(self.datasets["predict"], batch_size=self.batch_size, - num_workers=self.num_workers, worker_init_fn=init_fn) - - -class SetupCallback(Callback): - def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config): - super().__init__() - self.resume = resume - self.now = now - self.logdir = logdir - self.ckptdir = ckptdir - self.cfgdir = cfgdir - self.config = config - self.lightning_config = lightning_config - - def on_keyboard_interrupt(self, trainer, pl_module): - if trainer.global_rank == 0: - print("Summoning checkpoint.") - ckpt_path = os.path.join(self.ckptdir, "last.ckpt") - trainer.save_checkpoint(ckpt_path) - - def on_pretrain_routine_start(self, trainer, pl_module): - if trainer.global_rank == 0: - # Create logdirs and save configs - os.makedirs(self.logdir, exist_ok=True) - os.makedirs(self.ckptdir, exist_ok=True) - os.makedirs(self.cfgdir, exist_ok=True) - - if "callbacks" in self.lightning_config: - if 'metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']: - os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True) - print("Project config") - print(OmegaConf.to_yaml(self.config)) - OmegaConf.save(self.config, - os.path.join(self.cfgdir, "{}-project.yaml".format(self.now))) - - print("Lightning config") - print(OmegaConf.to_yaml(self.lightning_config)) - OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}), - os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now))) - - else: - # ModelCheckpoint callback created log directory --- remove it - if not self.resume and os.path.exists(self.logdir): - dst, name = os.path.split(self.logdir) - dst = os.path.join(dst, "child_runs", name) - os.makedirs(os.path.split(dst)[0], exist_ok=True) - try: - os.rename(self.logdir, dst) - except FileNotFoundError: - pass - - -class ImageLogger(Callback): - def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True, - rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False, - log_images_kwargs=None): - super().__init__() - self.rescale = rescale - self.batch_freq = batch_frequency - self.max_images = max_images - self.logger_log_images = { - pl.loggers.TestTubeLogger: self._testtube, - } - self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)] - if not increase_log_steps: - self.log_steps = [self.batch_freq] - self.clamp = clamp - self.disabled = disabled - self.log_on_batch_idx = log_on_batch_idx - self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {} - self.log_first_step = log_first_step - - @rank_zero_only - def _testtube(self, pl_module, images, batch_idx, split): - for k in images: - grid = torchvision.utils.make_grid(images[k]) - grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w - - tag = f"{split}/{k}" - pl_module.logger.experiment.add_image( - tag, grid, - global_step=pl_module.global_step) - - @rank_zero_only - def log_local(self, save_dir, split, images, - global_step, current_epoch, batch_idx): - root = os.path.join(save_dir, "images", split) - for k in images: - grid = torchvision.utils.make_grid(images[k], nrow=4) - if self.rescale: - grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w - grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1) - grid = grid.numpy() - grid = (grid * 255).astype(np.uint8) - filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format( - k, - global_step, - current_epoch, - batch_idx) - path = os.path.join(root, filename) - os.makedirs(os.path.split(path)[0], exist_ok=True) - Image.fromarray(grid).save(path) - - def log_img(self, pl_module, batch, batch_idx, split="train"): - check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step - if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0 - hasattr(pl_module, "log_images") and - callable(pl_module.log_images) and - self.max_images > 0): - logger = type(pl_module.logger) - - is_train = pl_module.training - if is_train: - pl_module.eval() - - with torch.no_grad(): - images = pl_module.log_images(batch, split=split, **self.log_images_kwargs) - - for k in images: - N = min(images[k].shape[0], self.max_images) - images[k] = images[k][:N] - if isinstance(images[k], torch.Tensor): - images[k] = images[k].detach().cpu() - if self.clamp: - images[k] = torch.clamp(images[k], -1., 1.) - - self.log_local(pl_module.logger.save_dir, split, images, - pl_module.global_step, pl_module.current_epoch, batch_idx) - - logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None) - logger_log_images(pl_module, images, pl_module.global_step, split) - - if is_train: - pl_module.train() - - def check_frequency(self, check_idx): - if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and ( - check_idx > 0 or self.log_first_step): - try: - self.log_steps.pop(0) - except IndexError as e: - print(e) - pass - return True - return False - - def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): - if not self.disabled and (pl_module.global_step > 0 or self.log_first_step): - self.log_img(pl_module, batch, batch_idx, split="train") - - def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): - if not self.disabled and pl_module.global_step > 0: - self.log_img(pl_module, batch, batch_idx, split="val") - if hasattr(pl_module, 'calibrate_grad_norm'): - if (pl_module.calibrate_grad_norm and batch_idx % 25 == 0) and batch_idx > 0: - self.log_gradients(trainer, pl_module, batch_idx=batch_idx) - - -class CUDACallback(Callback): - # see https://github.com/SeanNaren/minGPT/blob/master/mingpt/callback.py - def on_train_epoch_start(self, trainer, pl_module): - # Reset the memory use counter - torch.cuda.reset_peak_memory_stats(trainer.root_gpu) - torch.cuda.synchronize(trainer.root_gpu) - self.start_time = time.time() - - def on_train_epoch_end(self, trainer, pl_module, outputs): - torch.cuda.synchronize(trainer.root_gpu) - max_memory = torch.cuda.max_memory_allocated(trainer.root_gpu) / 2 ** 20 - epoch_time = time.time() - self.start_time - - try: - max_memory = trainer.training_type_plugin.reduce(max_memory) - epoch_time = trainer.training_type_plugin.reduce(epoch_time) - - rank_zero_info(f"Average Epoch time: {epoch_time:.2f} seconds") - rank_zero_info(f"Average Peak memory {max_memory:.2f}MiB") - except AttributeError: - pass - - -if __name__ == "__main__": - # custom parser to specify config files, train, test and debug mode, - # postfix, resume. - # `--key value` arguments are interpreted as arguments to the trainer. - # `nested.key=value` arguments are interpreted as config parameters. - # configs are merged from left-to-right followed by command line parameters. - - # model: - # base_learning_rate: float - # target: path to lightning module - # params: - # key: value - # data: - # target: main.DataModuleFromConfig - # params: - # batch_size: int - # wrap: bool - # train: - # target: path to train dataset - # params: - # key: value - # validation: - # target: path to validation dataset - # params: - # key: value - # test: - # target: path to test dataset - # params: - # key: value - # lightning: (optional, has sane defaults and can be specified on cmdline) - # trainer: - # additional arguments to trainer - # logger: - # logger to instantiate - # modelcheckpoint: - # modelcheckpoint to instantiate - # callbacks: - # callback1: - # target: importpath - # params: - # key: value - - now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") - - # add cwd for convenience and to make classes in this file available when - # running as `python main.py` - # (in particular `main.DataModuleFromConfig`) - sys.path.append(os.getcwd()) - - parser = get_parser() - parser = Trainer.add_argparse_args(parser) - - opt, unknown = parser.parse_known_args() - if opt.name and opt.resume: - raise ValueError( - "-n/--name and -r/--resume cannot be specified both." - "If you want to resume training in a new log folder, " - "use -n/--name in combination with --resume_from_checkpoint" - ) - if opt.resume: - if not os.path.exists(opt.resume): - raise ValueError("Cannot find {}".format(opt.resume)) - if os.path.isfile(opt.resume): - paths = opt.resume.split("/") - # idx = len(paths)-paths[::-1].index("logs")+1 - # logdir = "/".join(paths[:idx]) - logdir = "/".join(paths[:-2]) - ckpt = opt.resume - else: - assert os.path.isdir(opt.resume), opt.resume - logdir = opt.resume.rstrip("/") - ckpt = os.path.join(logdir, "checkpoints", "last.ckpt") - - opt.resume_from_checkpoint = ckpt - base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml"))) - opt.base = base_configs + opt.base - _tmp = logdir.split("/") - nowname = _tmp[-1] - else: - if opt.name: - name = "_" + opt.name - elif opt.base: - cfg_fname = os.path.split(opt.base[0])[-1] - cfg_name = os.path.splitext(cfg_fname)[0] - name = "_" + cfg_name - else: - name = "" - nowname = now + name + opt.postfix - logdir = os.path.join(opt.logdir, nowname) - - ckptdir = os.path.join(logdir, "checkpoints") - cfgdir = os.path.join(logdir, "configs") - seed_everything(opt.seed) - - try: - # init and save configs - configs = [OmegaConf.load(cfg) for cfg in opt.base] - cli = OmegaConf.from_dotlist(unknown) - config = OmegaConf.merge(*configs, cli) - lightning_config = config.pop("lightning", OmegaConf.create()) - # merge trainer cli with config - trainer_config = lightning_config.get("trainer", OmegaConf.create()) - # default to ddp - trainer_config["accelerator"] = "ddp" - for k in nondefault_trainer_args(opt): - trainer_config[k] = getattr(opt, k) - if not "gpus" in trainer_config: - del trainer_config["accelerator"] - cpu = True - else: - gpuinfo = trainer_config["gpus"] - print(f"Running on GPUs {gpuinfo}") - cpu = False - trainer_opt = argparse.Namespace(**trainer_config) - lightning_config.trainer = trainer_config - - # model - model = instantiate_from_config(config.model) - - # trainer and callbacks - trainer_kwargs = dict() - - # default logger configs - default_logger_cfgs = { - "wandb": { - "target": "pytorch_lightning.loggers.WandbLogger", - "params": { - "name": nowname, - "save_dir": logdir, - "offline": opt.debug, - "id": nowname, - } - }, - "testtube": { - "target": "pytorch_lightning.loggers.TestTubeLogger", - "params": { - "name": "testtube", - "save_dir": logdir, - } - }, - } - default_logger_cfg = default_logger_cfgs["testtube"] - if "logger" in lightning_config: - logger_cfg = lightning_config.logger - else: - logger_cfg = OmegaConf.create() - logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg) - trainer_kwargs["logger"] = instantiate_from_config(logger_cfg) - - # modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to - # specify which metric is used to determine best models - default_modelckpt_cfg = { - "target": "pytorch_lightning.callbacks.ModelCheckpoint", - "params": { - "dirpath": ckptdir, - "filename": "{epoch:06}", - "verbose": True, - "save_last": True, - } - } - if hasattr(model, "monitor"): - print(f"Monitoring {model.monitor} as checkpoint metric.") - default_modelckpt_cfg["params"]["monitor"] = model.monitor - default_modelckpt_cfg["params"]["save_top_k"] = 3 - - if "modelcheckpoint" in lightning_config: - modelckpt_cfg = lightning_config.modelcheckpoint - else: - modelckpt_cfg = OmegaConf.create() - modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg) - print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}") - if version.parse(pl.__version__) < version.parse('1.4.0'): - trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg) - - # add callback which sets up log directory - default_callbacks_cfg = { - "setup_callback": { - "target": "main.SetupCallback", - "params": { - "resume": opt.resume, - "now": now, - "logdir": logdir, - "ckptdir": ckptdir, - "cfgdir": cfgdir, - "config": config, - "lightning_config": lightning_config, - } - }, - "image_logger": { - "target": "main.ImageLogger", - "params": { - "batch_frequency": 750, - "max_images": 4, - "clamp": True - } - }, - "learning_rate_logger": { - "target": "main.LearningRateMonitor", - "params": { - "logging_interval": "step", - # "log_momentum": True - } - }, - "cuda_callback": { - "target": "main.CUDACallback" - }, - } - if version.parse(pl.__version__) >= version.parse('1.4.0'): - default_callbacks_cfg.update({'checkpoint_callback': modelckpt_cfg}) - - if "callbacks" in lightning_config: - callbacks_cfg = lightning_config.callbacks - else: - callbacks_cfg = OmegaConf.create() - - if 'metrics_over_trainsteps_checkpoint' in callbacks_cfg: - print( - 'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.') - default_metrics_over_trainsteps_ckpt_dict = { - 'metrics_over_trainsteps_checkpoint': - {"target": 'pytorch_lightning.callbacks.ModelCheckpoint', - 'params': { - "dirpath": os.path.join(ckptdir, 'trainstep_checkpoints'), - "filename": "{epoch:06}-{step:09}", - "verbose": True, - 'save_top_k': -1, - 'every_n_train_steps': 10000, - 'save_weights_only': True - } - } - } - default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict) - - callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg) - if 'ignore_keys_callback' in callbacks_cfg and hasattr(trainer_opt, 'resume_from_checkpoint'): - callbacks_cfg.ignore_keys_callback.params['ckpt_path'] = trainer_opt.resume_from_checkpoint - elif 'ignore_keys_callback' in callbacks_cfg: - del callbacks_cfg['ignore_keys_callback'] - - trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg] - - trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs) - trainer.logdir = logdir ### - - # data - data = instantiate_from_config(config.data) - # NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html - # calling these ourselves should not be necessary but it is. - # lightning still takes care of proper multiprocessing though - data.prepare_data() - data.setup() - print("#### Data #####") - for k in data.datasets: - print(f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}") - - # configure learning rate - bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate - if not cpu: - ngpu = len(lightning_config.trainer.gpus.strip(",").split(',')) - else: - ngpu = 1 - if 'accumulate_grad_batches' in lightning_config.trainer: - accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches - else: - accumulate_grad_batches = 1 - print(f"accumulate_grad_batches = {accumulate_grad_batches}") - lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches - if opt.scale_lr: - model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr - print( - "Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format( - model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr)) - else: - model.learning_rate = base_lr - print("++++ NOT USING LR SCALING ++++") - print(f"Setting learning rate to {model.learning_rate:.2e}") - - - # allow checkpointing via USR1 - def melk(*args, **kwargs): - # run all checkpoint hooks - if trainer.global_rank == 0: - print("Summoning checkpoint.") - ckpt_path = os.path.join(ckptdir, "last.ckpt") - trainer.save_checkpoint(ckpt_path) - - - def divein(*args, **kwargs): - if trainer.global_rank == 0: - import pudb; - pudb.set_trace() - - - import signal - - signal.signal(signal.SIGUSR1, melk) - signal.signal(signal.SIGUSR2, divein) - - # run - if opt.train: - try: - trainer.fit(model, data) - except Exception: - melk() - raise - if not opt.no_test and not trainer.interrupted: - trainer.test(model, data) - except Exception: - if opt.debug and trainer.global_rank == 0: - try: - import pudb as debugger - except ImportError: - import pdb as debugger - debugger.post_mortem() - raise - finally: - # move newly created debug project to debug_runs - if opt.debug and not opt.resume and trainer.global_rank == 0: - dst, name = os.path.split(logdir) - dst = os.path.join(dst, "debug_runs", name) - os.makedirs(os.path.split(dst)[0], exist_ok=True) - os.rename(logdir, dst) - try: - if trainer.global_rank == 0: - print(trainer.profiler.summary()) - except: - pass diff --git a/spaces/Kevin676/Clone-Your-Voice/encoder/inference.py b/spaces/Kevin676/Clone-Your-Voice/encoder/inference.py deleted file mode 100644 index 43862e43e663dc5b2053c0f784dfac98cb0bacb3..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Clone-Your-Voice/encoder/inference.py +++ /dev/null @@ -1,178 +0,0 @@ -from encoder.params_data import * -from encoder.model import SpeakerEncoder -from encoder.audio import preprocess_wav # We want to expose this function from here -from matplotlib import cm -from encoder import audio -from pathlib import Path -import numpy as np -import torch - -_model = None # type: SpeakerEncoder -_device = None # type: torch.device - - -def load_model(weights_fpath: Path, device=None): - """ - Loads the model in memory. If this function is not explicitely called, it will be run on the - first call to embed_frames() with the default weights file. - - :param weights_fpath: the path to saved model weights. - :param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). The - model will be loaded and will run on this device. Outputs will however always be on the cpu. - If None, will default to your GPU if it"s available, otherwise your CPU. - """ - # TODO: I think the slow loading of the encoder might have something to do with the device it - # was saved on. Worth investigating. - global _model, _device - if device is None: - _device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - elif isinstance(device, str): - _device = torch.device(device) - _model = SpeakerEncoder(_device, torch.device("cpu")) - checkpoint = torch.load(weights_fpath, _device) - _model.load_state_dict(checkpoint["model_state"]) - _model.eval() - print("Loaded encoder \"%s\" trained to step %d" % (weights_fpath.name, checkpoint["step"])) - - -def is_loaded(): - return _model is not None - - -def embed_frames_batch(frames_batch): - """ - Computes embeddings for a batch of mel spectrogram. - - :param frames_batch: a batch mel of spectrogram as a numpy array of float32 of shape - (batch_size, n_frames, n_channels) - :return: the embeddings as a numpy array of float32 of shape (batch_size, model_embedding_size) - """ - if _model is None: - raise Exception("Model was not loaded. Call load_model() before inference.") - - frames = torch.from_numpy(frames_batch).to(_device) - embed = _model.forward(frames).detach().cpu().numpy() - return embed - - -def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames, - min_pad_coverage=0.75, overlap=0.5): - """ - Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain - partial utterances of each. Both the waveform and the mel - spectrogram slices are returned, so as to make each partial utterance waveform correspond to - its spectrogram. This function assumes that the mel spectrogram parameters used are those - defined in params_data.py. - - The returned ranges may be indexing further than the length of the waveform. It is - recommended that you pad the waveform with zeros up to wave_slices[-1].stop. - - :param n_samples: the number of samples in the waveform - :param partial_utterance_n_frames: the number of mel spectrogram frames in each partial - utterance - :param min_pad_coverage: when reaching the last partial utterance, it may or may not have - enough frames. If at least of are present, - then the last partial utterance will be considered, as if we padded the audio. Otherwise, - it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial - utterance, this parameter is ignored so that the function always returns at least 1 slice. - :param overlap: by how much the partial utterance should overlap. If set to 0, the partial - utterances are entirely disjoint. - :return: the waveform slices and mel spectrogram slices as lists of array slices. Index - respectively the waveform and the mel spectrogram with these slices to obtain the partial - utterances. - """ - assert 0 <= overlap < 1 - assert 0 < min_pad_coverage <= 1 - - samples_per_frame = int((sampling_rate * mel_window_step / 1000)) - n_frames = int(np.ceil((n_samples + 1) / samples_per_frame)) - frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1) - - # Compute the slices - wav_slices, mel_slices = [], [] - steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1) - for i in range(0, steps, frame_step): - mel_range = np.array([i, i + partial_utterance_n_frames]) - wav_range = mel_range * samples_per_frame - mel_slices.append(slice(*mel_range)) - wav_slices.append(slice(*wav_range)) - - # Evaluate whether extra padding is warranted or not - last_wav_range = wav_slices[-1] - coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start) - if coverage < min_pad_coverage and len(mel_slices) > 1: - mel_slices = mel_slices[:-1] - wav_slices = wav_slices[:-1] - - return wav_slices, mel_slices - - -def embed_utterance(wav, using_partials=True, return_partials=False, **kwargs): - """ - Computes an embedding for a single utterance. - - # TODO: handle multiple wavs to benefit from batching on GPU - :param wav: a preprocessed (see audio.py) utterance waveform as a numpy array of float32 - :param using_partials: if True, then the utterance is split in partial utterances of - frames and the utterance embedding is computed from their - normalized average. If False, the utterance is instead computed from feeding the entire - spectogram to the network. - :param return_partials: if True, the partial embeddings will also be returned along with the - wav slices that correspond to the partial embeddings. - :param kwargs: additional arguments to compute_partial_splits() - :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If - is True, the partial utterances as a numpy array of float32 of shape - (n_partials, model_embedding_size) and the wav partials as a list of slices will also be - returned. If is simultaneously set to False, both these values will be None - instead. - """ - # Process the entire utterance if not using partials - if not using_partials: - frames = audio.wav_to_mel_spectrogram(wav) - embed = embed_frames_batch(frames[None, ...])[0] - if return_partials: - return embed, None, None - return embed - - # Compute where to split the utterance into partials and pad if necessary - wave_slices, mel_slices = compute_partial_slices(len(wav), **kwargs) - max_wave_length = wave_slices[-1].stop - if max_wave_length >= len(wav): - wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant") - - # Split the utterance into partials - frames = audio.wav_to_mel_spectrogram(wav) - frames_batch = np.array([frames[s] for s in mel_slices]) - partial_embeds = embed_frames_batch(frames_batch) - - # Compute the utterance embedding from the partial embeddings - raw_embed = np.mean(partial_embeds, axis=0) - embed = raw_embed / np.linalg.norm(raw_embed, 2) - - if return_partials: - return embed, partial_embeds, wave_slices - return embed - - -def embed_speaker(wavs, **kwargs): - raise NotImplemented() - - -def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)): - import matplotlib.pyplot as plt - if ax is None: - ax = plt.gca() - - if shape is None: - height = int(np.sqrt(len(embed))) - shape = (height, -1) - embed = embed.reshape(shape) - - cmap = cm.get_cmap() - mappable = ax.imshow(embed, cmap=cmap) - cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04) - sm = cm.ScalarMappable(cmap=cmap) - sm.set_clim(*color_range) - - ax.set_xticks([]), ax.set_yticks([]) - ax.set_title(title) diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/dii_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/dii_head.py deleted file mode 100644 index ae9a31bbeb2a8f1da62b457363fa05031d21925a..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/dii_head.py +++ /dev/null @@ -1,422 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List - -import torch -import torch.nn as nn -from mmcv.cnn import build_activation_layer, build_norm_layer -from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention -from mmengine.config import ConfigDict -from mmengine.model import bias_init_with_prob -from torch import Tensor - -from mmdet.models.losses import accuracy -from mmdet.models.task_modules import SamplingResult -from mmdet.models.utils import multi_apply -from mmdet.registry import MODELS -from mmdet.utils import ConfigType, OptConfigType, reduce_mean -from .bbox_head import BBoxHead - - -@MODELS.register_module() -class DIIHead(BBoxHead): - r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object - Detection with Learnable Proposals `_ - - Args: - num_classes (int): Number of class in dataset. - Defaults to 80. - num_ffn_fcs (int): The number of fully-connected - layers in FFNs. Defaults to 2. - num_heads (int): The hidden dimension of FFNs. - Defaults to 8. - num_cls_fcs (int): The number of fully-connected - layers in classification subnet. Defaults to 1. - num_reg_fcs (int): The number of fully-connected - layers in regression subnet. Defaults to 3. - feedforward_channels (int): The hidden dimension - of FFNs. Defaults to 2048 - in_channels (int): Hidden_channels of MultiheadAttention. - Defaults to 256. - dropout (float): Probability of drop the channel. - Defaults to 0.0 - ffn_act_cfg (:obj:`ConfigDict` or dict): The activation config - for FFNs. - dynamic_conv_cfg (:obj:`ConfigDict` or dict): The convolution - config for DynamicConv. - loss_iou (:obj:`ConfigDict` or dict): The config for iou or - giou loss. - init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ - dict]): Initialization config dict. Defaults to None. - """ - - def __init__(self, - num_classes: int = 80, - num_ffn_fcs: int = 2, - num_heads: int = 8, - num_cls_fcs: int = 1, - num_reg_fcs: int = 3, - feedforward_channels: int = 2048, - in_channels: int = 256, - dropout: float = 0.0, - ffn_act_cfg: ConfigType = dict(type='ReLU', inplace=True), - dynamic_conv_cfg: ConfigType = dict( - type='DynamicConv', - in_channels=256, - feat_channels=64, - out_channels=256, - input_feat_shape=7, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN')), - loss_iou: ConfigType = dict(type='GIoULoss', loss_weight=2.0), - init_cfg: OptConfigType = None, - **kwargs) -> None: - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super().__init__( - num_classes=num_classes, - reg_decoded_bbox=True, - reg_class_agnostic=True, - init_cfg=init_cfg, - **kwargs) - self.loss_iou = MODELS.build(loss_iou) - self.in_channels = in_channels - self.fp16_enabled = False - self.attention = MultiheadAttention(in_channels, num_heads, dropout) - self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1] - - self.instance_interactive_conv = MODELS.build(dynamic_conv_cfg) - self.instance_interactive_conv_dropout = nn.Dropout(dropout) - self.instance_interactive_conv_norm = build_norm_layer( - dict(type='LN'), in_channels)[1] - - self.ffn = FFN( - in_channels, - feedforward_channels, - num_ffn_fcs, - act_cfg=ffn_act_cfg, - dropout=dropout) - self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1] - - self.cls_fcs = nn.ModuleList() - for _ in range(num_cls_fcs): - self.cls_fcs.append( - nn.Linear(in_channels, in_channels, bias=False)) - self.cls_fcs.append( - build_norm_layer(dict(type='LN'), in_channels)[1]) - self.cls_fcs.append( - build_activation_layer(dict(type='ReLU', inplace=True))) - - # over load the self.fc_cls in BBoxHead - if self.loss_cls.use_sigmoid: - self.fc_cls = nn.Linear(in_channels, self.num_classes) - else: - self.fc_cls = nn.Linear(in_channels, self.num_classes + 1) - - self.reg_fcs = nn.ModuleList() - for _ in range(num_reg_fcs): - self.reg_fcs.append( - nn.Linear(in_channels, in_channels, bias=False)) - self.reg_fcs.append( - build_norm_layer(dict(type='LN'), in_channels)[1]) - self.reg_fcs.append( - build_activation_layer(dict(type='ReLU', inplace=True))) - # over load the self.fc_cls in BBoxHead - self.fc_reg = nn.Linear(in_channels, 4) - - assert self.reg_class_agnostic, 'DIIHead only ' \ - 'suppport `reg_class_agnostic=True` ' - assert self.reg_decoded_bbox, 'DIIHead only ' \ - 'suppport `reg_decoded_bbox=True`' - - def init_weights(self) -> None: - """Use xavier initialization for all weight parameter and set - classification head bias as a specific value when use focal loss.""" - super().init_weights() - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - else: - # adopt the default initialization for - # the weight and bias of the layer norm - pass - if self.loss_cls.use_sigmoid: - bias_init = bias_init_with_prob(0.01) - nn.init.constant_(self.fc_cls.bias, bias_init) - - def forward(self, roi_feat: Tensor, proposal_feat: Tensor) -> tuple: - """Forward function of Dynamic Instance Interactive Head. - - Args: - roi_feat (Tensor): Roi-pooling features with shape - (batch_size*num_proposals, feature_dimensions, - pooling_h , pooling_w). - proposal_feat (Tensor): Intermediate feature get from - diihead in last stage, has shape - (batch_size, num_proposals, feature_dimensions) - - Returns: - tuple[Tensor]: Usually a tuple of classification scores - and bbox prediction and a intermediate feature. - - - cls_scores (Tensor): Classification scores for - all proposals, has shape - (batch_size, num_proposals, num_classes). - - bbox_preds (Tensor): Box energies / deltas for - all proposals, has shape - (batch_size, num_proposals, 4). - - obj_feat (Tensor): Object feature before classification - and regression subnet, has shape - (batch_size, num_proposal, feature_dimensions). - - attn_feats (Tensor): Intermediate feature. - """ - N, num_proposals = proposal_feat.shape[:2] - - # Self attention - proposal_feat = proposal_feat.permute(1, 0, 2) - proposal_feat = self.attention_norm(self.attention(proposal_feat)) - attn_feats = proposal_feat.permute(1, 0, 2) - - # instance interactive - proposal_feat = attn_feats.reshape(-1, self.in_channels) - proposal_feat_iic = self.instance_interactive_conv( - proposal_feat, roi_feat) - proposal_feat = proposal_feat + self.instance_interactive_conv_dropout( - proposal_feat_iic) - obj_feat = self.instance_interactive_conv_norm(proposal_feat) - - # FFN - obj_feat = self.ffn_norm(self.ffn(obj_feat)) - - cls_feat = obj_feat - reg_feat = obj_feat - - for cls_layer in self.cls_fcs: - cls_feat = cls_layer(cls_feat) - for reg_layer in self.reg_fcs: - reg_feat = reg_layer(reg_feat) - - cls_score = self.fc_cls(cls_feat).view( - N, num_proposals, self.num_classes - if self.loss_cls.use_sigmoid else self.num_classes + 1) - bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4) - - return cls_score, bbox_delta, obj_feat.view( - N, num_proposals, self.in_channels), attn_feats - - def loss_and_target(self, - cls_score: Tensor, - bbox_pred: Tensor, - sampling_results: List[SamplingResult], - rcnn_train_cfg: ConfigType, - imgs_whwh: Tensor, - concat: bool = True, - reduction_override: str = None) -> dict: - """Calculate the loss based on the features extracted by the DIIHead. - - Args: - cls_score (Tensor): Classification prediction - results of all class, has shape - (batch_size * num_proposals_single_image, num_classes) - bbox_pred (Tensor): Regression prediction results, has shape - (batch_size * num_proposals_single_image, 4), the last - dimension 4 represents [tl_x, tl_y, br_x, br_y]. - sampling_results (List[obj:SamplingResult]): Assign results of - all images in a batch after sampling. - rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. - imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\ - shape (batch_size, num_proposals, 4), the last - dimension means - [img_width,img_height, img_width, img_height]. - concat (bool): Whether to concatenate the results of all - the images in a single batch. Defaults to True. - reduction_override (str, optional): The reduction - method used to override the original reduction - method of the loss. Options are "none", - "mean" and "sum". Defaults to None. - - Returns: - dict: A dictionary of loss and targets components. - The targets are only used for cascade rcnn. - """ - cls_reg_targets = self.get_targets( - sampling_results=sampling_results, - rcnn_train_cfg=rcnn_train_cfg, - concat=concat) - (labels, label_weights, bbox_targets, bbox_weights) = cls_reg_targets - - losses = dict() - bg_class_ind = self.num_classes - # note in spare rcnn num_gt == num_pos - pos_inds = (labels >= 0) & (labels < bg_class_ind) - num_pos = pos_inds.sum().float() - avg_factor = reduce_mean(num_pos) - if cls_score is not None: - if cls_score.numel() > 0: - losses['loss_cls'] = self.loss_cls( - cls_score, - labels, - label_weights, - avg_factor=avg_factor, - reduction_override=reduction_override) - losses['pos_acc'] = accuracy(cls_score[pos_inds], - labels[pos_inds]) - if bbox_pred is not None: - # 0~self.num_classes-1 are FG, self.num_classes is BG - # do not perform bounding box regression for BG anymore. - if pos_inds.any(): - pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0), - 4)[pos_inds.type(torch.bool)] - imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0), - 4)[pos_inds.type(torch.bool)] - losses['loss_bbox'] = self.loss_bbox( - pos_bbox_pred / imgs_whwh, - bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh, - bbox_weights[pos_inds.type(torch.bool)], - avg_factor=avg_factor) - losses['loss_iou'] = self.loss_iou( - pos_bbox_pred, - bbox_targets[pos_inds.type(torch.bool)], - bbox_weights[pos_inds.type(torch.bool)], - avg_factor=avg_factor) - else: - losses['loss_bbox'] = bbox_pred.sum() * 0 - losses['loss_iou'] = bbox_pred.sum() * 0 - return dict(loss_bbox=losses, bbox_targets=cls_reg_targets) - - def _get_targets_single(self, pos_inds: Tensor, neg_inds: Tensor, - pos_priors: Tensor, neg_priors: Tensor, - pos_gt_bboxes: Tensor, pos_gt_labels: Tensor, - cfg: ConfigDict) -> tuple: - """Calculate the ground truth for proposals in the single image - according to the sampling results. - - Almost the same as the implementation in `bbox_head`, - we add pos_inds and neg_inds to select positive and - negative samples instead of selecting the first num_pos - as positive samples. - - Args: - pos_inds (Tensor): The length is equal to the - positive sample numbers contain all index - of the positive sample in the origin proposal set. - neg_inds (Tensor): The length is equal to the - negative sample numbers contain all index - of the negative sample in the origin proposal set. - pos_priors (Tensor): Contains all the positive boxes, - has shape (num_pos, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - neg_priors (Tensor): Contains all the negative boxes, - has shape (num_neg, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_bboxes (Tensor): Contains gt_boxes for - all positive samples, has shape (num_pos, 4), - the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_labels (Tensor): Contains gt_labels for - all positive samples, has shape (num_pos, ). - cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. - - Returns: - Tuple[Tensor]: Ground truth for proposals in a single image. - Containing the following Tensors: - - - labels(Tensor): Gt_labels for all proposals, has - shape (num_proposals,). - - label_weights(Tensor): Labels_weights for all proposals, has - shape (num_proposals,). - - bbox_targets(Tensor):Regression target for all proposals, has - shape (num_proposals, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - - bbox_weights(Tensor):Regression weights for all proposals, - has shape (num_proposals, 4). - """ - num_pos = pos_priors.size(0) - num_neg = neg_priors.size(0) - num_samples = num_pos + num_neg - - # original implementation uses new_zeros since BG are set to be 0 - # now use empty & fill because BG cat_id = num_classes, - # FG cat_id = [0, num_classes-1] - labels = pos_priors.new_full((num_samples, ), - self.num_classes, - dtype=torch.long) - label_weights = pos_priors.new_zeros(num_samples) - bbox_targets = pos_priors.new_zeros(num_samples, 4) - bbox_weights = pos_priors.new_zeros(num_samples, 4) - if num_pos > 0: - labels[pos_inds] = pos_gt_labels - pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight - label_weights[pos_inds] = pos_weight - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - pos_priors, pos_gt_bboxes) - else: - pos_bbox_targets = pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1 - if num_neg > 0: - label_weights[neg_inds] = 1.0 - - return labels, label_weights, bbox_targets, bbox_weights - - def get_targets(self, - sampling_results: List[SamplingResult], - rcnn_train_cfg: ConfigDict, - concat: bool = True) -> tuple: - """Calculate the ground truth for all samples in a batch according to - the sampling_results. - - Almost the same as the implementation in bbox_head, we passed - additional parameters pos_inds_list and neg_inds_list to - `_get_targets_single` function. - - Args: - sampling_results (List[obj:SamplingResult]): Assign results of - all images in a batch after sampling. - rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. - concat (bool): Whether to concatenate the results of all - the images in a single batch. - - Returns: - Tuple[Tensor]: Ground truth for proposals in a single image. - Containing the following list of Tensors: - - - labels (list[Tensor],Tensor): Gt_labels for all - proposals in a batch, each tensor in list has - shape (num_proposals,) when `concat=False`, otherwise just - a single tensor has shape (num_all_proposals,). - - label_weights (list[Tensor]): Labels_weights for - all proposals in a batch, each tensor in list has shape - (num_proposals,) when `concat=False`, otherwise just a - single tensor has shape (num_all_proposals,). - - bbox_targets (list[Tensor],Tensor): Regression target - for all proposals in a batch, each tensor in list has - shape (num_proposals, 4) when `concat=False`, otherwise - just a single tensor has shape (num_all_proposals, 4), - the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - - bbox_weights (list[tensor],Tensor): Regression weights for - all proposals in a batch, each tensor in list has shape - (num_proposals, 4) when `concat=False`, otherwise just a - single tensor has shape (num_all_proposals, 4). - """ - pos_inds_list = [res.pos_inds for res in sampling_results] - neg_inds_list = [res.neg_inds for res in sampling_results] - pos_priors_list = [res.pos_priors for res in sampling_results] - neg_priors_list = [res.neg_priors for res in sampling_results] - pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] - pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] - labels, label_weights, bbox_targets, bbox_weights = multi_apply( - self._get_targets_single, - pos_inds_list, - neg_inds_list, - pos_priors_list, - neg_priors_list, - pos_gt_bboxes_list, - pos_gt_labels_list, - cfg=rcnn_train_cfg) - if concat: - labels = torch.cat(labels, 0) - label_weights = torch.cat(label_weights, 0) - bbox_targets = torch.cat(bbox_targets, 0) - bbox_weights = torch.cat(bbox_weights, 0) - return labels, label_weights, bbox_targets, bbox_weights diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py deleted file mode 100644 index 1c888f1e78d60433bf0333c642cc2f89e6d95614..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py +++ /dev/null @@ -1,622 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmengine.config import ConfigDict -from mmengine.structures import InstanceData -from torch import Tensor, nn - -from mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead -from mmdet.models.task_modules.samplers import SamplingResult -from mmdet.models.utils import empty_instances -from mmdet.registry import MODELS -from mmdet.structures.bbox import bbox_overlaps - - -@MODELS.register_module() -class MultiInstanceBBoxHead(BBoxHead): - r"""Bbox head used in CrowdDet. - - .. code-block:: none - - /-> cls convs_1 -> cls fcs_1 -> cls_1 - |-- - | \-> reg convs_1 -> reg fcs_1 -> reg_1 - | - | /-> cls convs_2 -> cls fcs_2 -> cls_2 - shared convs -> shared fcs |-- - | \-> reg convs_2 -> reg fcs_2 -> reg_2 - | - | ... - | - | /-> cls convs_k -> cls fcs_k -> cls_k - |-- - \-> reg convs_k -> reg fcs_k -> reg_k - - - Args: - num_instance (int): The number of branches after shared fcs. - Defaults to 2. - with_refine (bool): Whether to use refine module. Defaults to False. - num_shared_convs (int): The number of shared convs. Defaults to 0. - num_shared_fcs (int): The number of shared fcs. Defaults to 2. - num_cls_convs (int): The number of cls convs. Defaults to 0. - num_cls_fcs (int): The number of cls fcs. Defaults to 0. - num_reg_convs (int): The number of reg convs. Defaults to 0. - num_reg_fcs (int): The number of reg fcs. Defaults to 0. - conv_out_channels (int): The number of conv out channels. - Defaults to 256. - fc_out_channels (int): The number of fc out channels. Defaults to 1024. - init_cfg (dict or list[dict], optional): Initialization config dict. - Defaults to None. - """ # noqa: W605 - - def __init__(self, - num_instance: int = 2, - with_refine: bool = False, - num_shared_convs: int = 0, - num_shared_fcs: int = 2, - num_cls_convs: int = 0, - num_cls_fcs: int = 0, - num_reg_convs: int = 0, - num_reg_fcs: int = 0, - conv_out_channels: int = 256, - fc_out_channels: int = 1024, - init_cfg: Optional[Union[dict, ConfigDict]] = None, - *args, - **kwargs) -> None: - super().__init__(*args, init_cfg=init_cfg, **kwargs) - assert (num_shared_convs + num_shared_fcs + num_cls_convs + - num_cls_fcs + num_reg_convs + num_reg_fcs > 0) - assert num_instance == 2, 'Currently only 2 instances are supported' - if num_cls_convs > 0 or num_reg_convs > 0: - assert num_shared_fcs == 0 - if not self.with_cls: - assert num_cls_convs == 0 and num_cls_fcs == 0 - if not self.with_reg: - assert num_reg_convs == 0 and num_reg_fcs == 0 - self.num_instance = num_instance - self.num_shared_convs = num_shared_convs - self.num_shared_fcs = num_shared_fcs - self.num_cls_convs = num_cls_convs - self.num_cls_fcs = num_cls_fcs - self.num_reg_convs = num_reg_convs - self.num_reg_fcs = num_reg_fcs - self.conv_out_channels = conv_out_channels - self.fc_out_channels = fc_out_channels - self.with_refine = with_refine - - # add shared convs and fcs - self.shared_convs, self.shared_fcs, last_layer_dim = \ - self._add_conv_fc_branch( - self.num_shared_convs, self.num_shared_fcs, self.in_channels, - True) - self.shared_out_channels = last_layer_dim - self.relu = nn.ReLU(inplace=True) - - if self.with_refine: - refine_model_cfg = { - 'type': 'Linear', - 'in_features': self.shared_out_channels + 20, - 'out_features': self.shared_out_channels - } - self.shared_fcs_ref = MODELS.build(refine_model_cfg) - self.fc_cls_ref = nn.ModuleList() - self.fc_reg_ref = nn.ModuleList() - - self.cls_convs = nn.ModuleList() - self.cls_fcs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - self.reg_fcs = nn.ModuleList() - self.cls_last_dim = list() - self.reg_last_dim = list() - self.fc_cls = nn.ModuleList() - self.fc_reg = nn.ModuleList() - for k in range(self.num_instance): - # add cls specific branch - cls_convs, cls_fcs, cls_last_dim = self._add_conv_fc_branch( - self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) - self.cls_convs.append(cls_convs) - self.cls_fcs.append(cls_fcs) - self.cls_last_dim.append(cls_last_dim) - - # add reg specific branch - reg_convs, reg_fcs, reg_last_dim = self._add_conv_fc_branch( - self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) - self.reg_convs.append(reg_convs) - self.reg_fcs.append(reg_fcs) - self.reg_last_dim.append(reg_last_dim) - - if self.num_shared_fcs == 0 and not self.with_avg_pool: - if self.num_cls_fcs == 0: - self.cls_last_dim *= self.roi_feat_area - if self.num_reg_fcs == 0: - self.reg_last_dim *= self.roi_feat_area - - if self.with_cls: - if self.custom_cls_channels: - cls_channels = self.loss_cls.get_cls_channels( - self.num_classes) - else: - cls_channels = self.num_classes + 1 - cls_predictor_cfg_ = self.cls_predictor_cfg.copy() # deepcopy - cls_predictor_cfg_.update( - in_features=self.cls_last_dim[k], - out_features=cls_channels) - self.fc_cls.append(MODELS.build(cls_predictor_cfg_)) - if self.with_refine: - self.fc_cls_ref.append(MODELS.build(cls_predictor_cfg_)) - - if self.with_reg: - out_dim_reg = (4 if self.reg_class_agnostic else 4 * - self.num_classes) - reg_predictor_cfg_ = self.reg_predictor_cfg.copy() - reg_predictor_cfg_.update( - in_features=self.reg_last_dim[k], out_features=out_dim_reg) - self.fc_reg.append(MODELS.build(reg_predictor_cfg_)) - if self.with_refine: - self.fc_reg_ref.append(MODELS.build(reg_predictor_cfg_)) - - if init_cfg is None: - # when init_cfg is None, - # It has been set to - # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], - # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] - # after `super(ConvFCBBoxHead, self).__init__()` - # we only need to append additional configuration - # for `shared_fcs`, `cls_fcs` and `reg_fcs` - self.init_cfg += [ - dict( - type='Xavier', - distribution='uniform', - override=[ - dict(name='shared_fcs'), - dict(name='cls_fcs'), - dict(name='reg_fcs') - ]) - ] - - def _add_conv_fc_branch(self, - num_branch_convs: int, - num_branch_fcs: int, - in_channels: int, - is_shared: bool = False) -> tuple: - """Add shared or separable branch. - - convs -> avg pool (optional) -> fcs - """ - last_layer_dim = in_channels - # add branch specific conv layers - branch_convs = nn.ModuleList() - if num_branch_convs > 0: - for i in range(num_branch_convs): - conv_in_channels = ( - last_layer_dim if i == 0 else self.conv_out_channels) - branch_convs.append( - ConvModule( - conv_in_channels, self.conv_out_channels, 3, - padding=1)) - last_layer_dim = self.conv_out_channels - # add branch specific fc layers - branch_fcs = nn.ModuleList() - if num_branch_fcs > 0: - # for shared branch, only consider self.with_avg_pool - # for separated branches, also consider self.num_shared_fcs - if (is_shared - or self.num_shared_fcs == 0) and not self.with_avg_pool: - last_layer_dim *= self.roi_feat_area - for i in range(num_branch_fcs): - fc_in_channels = ( - last_layer_dim if i == 0 else self.fc_out_channels) - branch_fcs.append( - nn.Linear(fc_in_channels, self.fc_out_channels)) - last_layer_dim = self.fc_out_channels - return branch_convs, branch_fcs, last_layer_dim - - def forward(self, x: Tuple[Tensor]) -> tuple: - """Forward features from the upstream network. - - Args: - x (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: A tuple of classification scores and bbox prediction. - - - cls_score (Tensor): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * num_classes. - - bbox_pred (Tensor): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * 4. - - cls_score_ref (Tensor): The cls_score after refine model. - - bbox_pred_ref (Tensor): The bbox_pred after refine model. - """ - # shared part - if self.num_shared_convs > 0: - for conv in self.shared_convs: - x = conv(x) - - if self.num_shared_fcs > 0: - if self.with_avg_pool: - x = self.avg_pool(x) - - x = x.flatten(1) - for fc in self.shared_fcs: - x = self.relu(fc(x)) - - x_cls = x - x_reg = x - # separate branches - cls_score = list() - bbox_pred = list() - for k in range(self.num_instance): - for conv in self.cls_convs[k]: - x_cls = conv(x_cls) - if x_cls.dim() > 2: - if self.with_avg_pool: - x_cls = self.avg_pool(x_cls) - x_cls = x_cls.flatten(1) - for fc in self.cls_fcs[k]: - x_cls = self.relu(fc(x_cls)) - - for conv in self.reg_convs[k]: - x_reg = conv(x_reg) - if x_reg.dim() > 2: - if self.with_avg_pool: - x_reg = self.avg_pool(x_reg) - x_reg = x_reg.flatten(1) - for fc in self.reg_fcs[k]: - x_reg = self.relu(fc(x_reg)) - - cls_score.append(self.fc_cls[k](x_cls) if self.with_cls else None) - bbox_pred.append(self.fc_reg[k](x_reg) if self.with_reg else None) - - if self.with_refine: - x_ref = x - cls_score_ref = list() - bbox_pred_ref = list() - for k in range(self.num_instance): - feat_ref = cls_score[k].softmax(dim=-1) - feat_ref = torch.cat((bbox_pred[k], feat_ref[:, 1][:, None]), - dim=1).repeat(1, 4) - feat_ref = torch.cat((x_ref, feat_ref), dim=1) - feat_ref = F.relu_(self.shared_fcs_ref(feat_ref)) - - cls_score_ref.append(self.fc_cls_ref[k](feat_ref)) - bbox_pred_ref.append(self.fc_reg_ref[k](feat_ref)) - - cls_score = torch.cat(cls_score, dim=1) - bbox_pred = torch.cat(bbox_pred, dim=1) - cls_score_ref = torch.cat(cls_score_ref, dim=1) - bbox_pred_ref = torch.cat(bbox_pred_ref, dim=1) - return cls_score, bbox_pred, cls_score_ref, bbox_pred_ref - - cls_score = torch.cat(cls_score, dim=1) - bbox_pred = torch.cat(bbox_pred, dim=1) - - return cls_score, bbox_pred - - def get_targets(self, - sampling_results: List[SamplingResult], - rcnn_train_cfg: ConfigDict, - concat: bool = True) -> tuple: - """Calculate the ground truth for all samples in a batch according to - the sampling_results. - - Almost the same as the implementation in bbox_head, we passed - additional parameters pos_inds_list and neg_inds_list to - `_get_targets_single` function. - - Args: - sampling_results (List[obj:SamplingResult]): Assign results of - all images in a batch after sampling. - rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. - concat (bool): Whether to concatenate the results of all - the images in a single batch. - - Returns: - Tuple[Tensor]: Ground truth for proposals in a single image. - Containing the following list of Tensors: - - - labels (list[Tensor],Tensor): Gt_labels for all proposals in a - batch, each tensor in list has shape (num_proposals,) when - `concat=False`, otherwise just a single tensor has shape - (num_all_proposals,). - - label_weights (list[Tensor]): Labels_weights for - all proposals in a batch, each tensor in list has shape - (num_proposals,) when `concat=False`, otherwise just a single - tensor has shape (num_all_proposals,). - - bbox_targets (list[Tensor],Tensor): Regression target for all - proposals in a batch, each tensor in list has shape - (num_proposals, 4) when `concat=False`, otherwise just a single - tensor has shape (num_all_proposals, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - - bbox_weights (list[tensor],Tensor): Regression weights for - all proposals in a batch, each tensor in list has shape - (num_proposals, 4) when `concat=False`, otherwise just a - single tensor has shape (num_all_proposals, 4). - """ - labels = [] - bbox_targets = [] - bbox_weights = [] - label_weights = [] - for i in range(len(sampling_results)): - sample_bboxes = torch.cat([ - sampling_results[i].pos_gt_bboxes, - sampling_results[i].neg_gt_bboxes - ]) - sample_priors = sampling_results[i].priors - sample_priors = sample_priors.repeat(1, self.num_instance).reshape( - -1, 4) - sample_bboxes = sample_bboxes.reshape(-1, 4) - - if not self.reg_decoded_bbox: - _bbox_targets = self.bbox_coder.encode(sample_priors, - sample_bboxes) - else: - _bbox_targets = sample_priors - _bbox_targets = _bbox_targets.reshape(-1, self.num_instance * 4) - _bbox_weights = torch.ones(_bbox_targets.shape) - _labels = torch.cat([ - sampling_results[i].pos_gt_labels, - sampling_results[i].neg_gt_labels - ]) - _labels_weights = torch.ones(_labels.shape) - - bbox_targets.append(_bbox_targets) - bbox_weights.append(_bbox_weights) - labels.append(_labels) - label_weights.append(_labels_weights) - - if concat: - labels = torch.cat(labels, 0) - label_weights = torch.cat(label_weights, 0) - bbox_targets = torch.cat(bbox_targets, 0) - bbox_weights = torch.cat(bbox_weights, 0) - return labels, label_weights, bbox_targets, bbox_weights - - def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, - labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, - bbox_weights: Tensor, **kwargs) -> dict: - """Calculate the loss based on the network predictions and targets. - - Args: - cls_score (Tensor): Classification prediction results of all class, - has shape (batch_size * num_proposals_single_image, - (num_classes + 1) * k), k represents the number of prediction - boxes generated by each proposal box. - bbox_pred (Tensor): Regression prediction results, has shape - (batch_size * num_proposals_single_image, 4 * k), the last - dimension 4 represents [tl_x, tl_y, br_x, br_y]. - rois (Tensor): RoIs with the shape - (batch_size * num_proposals_single_image, 5) where the first - column indicates batch id of each RoI. - labels (Tensor): Gt_labels for all proposals in a batch, has - shape (batch_size * num_proposals_single_image, k). - label_weights (Tensor): Labels_weights for all proposals in a - batch, has shape (batch_size * num_proposals_single_image, k). - bbox_targets (Tensor): Regression target for all proposals in a - batch, has shape (batch_size * num_proposals_single_image, - 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, - br_y]. - bbox_weights (Tensor): Regression weights for all proposals in a - batch, has shape (batch_size * num_proposals_single_image, - 4 * k). - - Returns: - dict: A dictionary of loss. - """ - losses = dict() - if bbox_pred.numel(): - loss_0 = self.emd_loss(bbox_pred[:, 0:4], cls_score[:, 0:2], - bbox_pred[:, 4:8], cls_score[:, 2:4], - bbox_targets, labels) - loss_1 = self.emd_loss(bbox_pred[:, 4:8], cls_score[:, 2:4], - bbox_pred[:, 0:4], cls_score[:, 0:2], - bbox_targets, labels) - loss = torch.cat([loss_0, loss_1], dim=1) - _, min_indices = loss.min(dim=1) - loss_emd = loss[torch.arange(loss.shape[0]), min_indices] - loss_emd = loss_emd.mean() - else: - loss_emd = bbox_pred.sum() - losses['loss_rcnn_emd'] = loss_emd - return losses - - def emd_loss(self, bbox_pred_0: Tensor, cls_score_0: Tensor, - bbox_pred_1: Tensor, cls_score_1: Tensor, targets: Tensor, - labels: Tensor) -> Tensor: - """Calculate the emd loss. - - Note: - This implementation is modified from https://github.com/Purkialo/ - CrowdDet/blob/master/lib/det_oprs/loss_opr.py - - Args: - bbox_pred_0 (Tensor): Part of regression prediction results, has - shape (batch_size * num_proposals_single_image, 4), the last - dimension 4 represents [tl_x, tl_y, br_x, br_y]. - cls_score_0 (Tensor): Part of classification prediction results, - has shape (batch_size * num_proposals_single_image, - (num_classes + 1)), where 1 represents the background. - bbox_pred_1 (Tensor): The other part of regression prediction - results, has shape (batch_size*num_proposals_single_image, 4). - cls_score_1 (Tensor):The other part of classification prediction - results, has shape (batch_size * num_proposals_single_image, - (num_classes + 1)). - targets (Tensor):Regression target for all proposals in a - batch, has shape (batch_size * num_proposals_single_image, - 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, - br_y], k represents the number of prediction boxes generated - by each proposal box. - labels (Tensor): Gt_labels for all proposals in a batch, has - shape (batch_size * num_proposals_single_image, k). - - Returns: - torch.Tensor: The calculated loss. - """ - - bbox_pred = torch.cat([bbox_pred_0, bbox_pred_1], - dim=1).reshape(-1, bbox_pred_0.shape[-1]) - cls_score = torch.cat([cls_score_0, cls_score_1], - dim=1).reshape(-1, cls_score_0.shape[-1]) - targets = targets.reshape(-1, 4) - labels = labels.long().flatten() - - # masks - valid_masks = labels >= 0 - fg_masks = labels > 0 - - # multiple class - bbox_pred = bbox_pred.reshape(-1, self.num_classes, 4) - fg_gt_classes = labels[fg_masks] - bbox_pred = bbox_pred[fg_masks, fg_gt_classes - 1, :] - - # loss for regression - loss_bbox = self.loss_bbox(bbox_pred, targets[fg_masks]) - loss_bbox = loss_bbox.sum(dim=1) - - # loss for classification - labels = labels * valid_masks - loss_cls = self.loss_cls(cls_score, labels) - - loss_cls[fg_masks] = loss_cls[fg_masks] + loss_bbox - loss = loss_cls.reshape(-1, 2).sum(dim=1) - return loss.reshape(-1, 1) - - def _predict_by_feat_single( - self, - roi: Tensor, - cls_score: Tensor, - bbox_pred: Tensor, - img_meta: dict, - rescale: bool = False, - rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: - """Transform a single image's features extracted from the head into - bbox results. - - Args: - roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). - last dimension 5 arrange as (batch_index, x1, y1, x2, y2). - cls_score (Tensor): Box scores, has shape - (num_boxes, num_classes + 1). - bbox_pred (Tensor): Box energies / deltas. has shape - (num_boxes, num_classes * 4). - img_meta (dict): image information. - rescale (bool): If True, return boxes in original image space. - Defaults to False. - rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. - Defaults to None - - Returns: - :obj:`InstanceData`: Detection results of each image. - Each item usually contains following keys. - - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - - bboxes (Tensor): Has a shape (num_instances, 4), - the last dimension 4 arrange as (x1, y1, x2, y2). - """ - - cls_score = cls_score.reshape(-1, self.num_classes + 1) - bbox_pred = bbox_pred.reshape(-1, 4) - roi = roi.repeat_interleave(self.num_instance, dim=0) - - results = InstanceData() - if roi.shape[0] == 0: - return empty_instances([img_meta], - roi.device, - task_type='bbox', - instance_results=[results])[0] - - scores = cls_score.softmax(dim=-1) if cls_score is not None else None - img_shape = img_meta['img_shape'] - bboxes = self.bbox_coder.decode( - roi[..., 1:], bbox_pred, max_shape=img_shape) - - if rescale and bboxes.size(0) > 0: - assert img_meta.get('scale_factor') is not None - scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( - (1, 2)) - bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( - bboxes.size()[0], -1) - - if rcnn_test_cfg is None: - # This means that it is aug test. - # It needs to return the raw results without nms. - results.bboxes = bboxes - results.scores = scores - else: - roi_idx = np.tile( - np.arange(bboxes.shape[0] / self.num_instance)[:, None], - (1, self.num_instance)).reshape(-1, 1)[:, 0] - roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( - -1, 1) - bboxes = torch.cat([bboxes, roi_idx], dim=1) - det_bboxes, det_scores = self.set_nms( - bboxes, scores[:, 1], rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) - - results.bboxes = det_bboxes[:, :-1] - results.scores = det_scores - results.labels = torch.zeros_like(det_scores) - - return results - - @staticmethod - def set_nms(bboxes: Tensor, - scores: Tensor, - score_thr: float, - iou_threshold: float, - max_num: int = -1) -> Tuple[Tensor, Tensor]: - """NMS for multi-instance prediction. Please refer to - https://github.com/Purkialo/CrowdDet for more details. - - Args: - bboxes (Tensor): predict bboxes. - scores (Tensor): The score of each predict bbox. - score_thr (float): bbox threshold, bboxes with scores lower than it - will not be considered. - iou_threshold (float): IoU threshold to be considered as - conflicted. - max_num (int, optional): if there are more than max_num bboxes - after NMS, only top max_num will be kept. Default to -1. - - Returns: - Tuple[Tensor, Tensor]: (bboxes, scores). - """ - - bboxes = bboxes[scores > score_thr] - scores = scores[scores > score_thr] - - ordered_scores, order = scores.sort(descending=True) - ordered_bboxes = bboxes[order] - roi_idx = ordered_bboxes[:, -1] - - keep = torch.ones(len(ordered_bboxes)) == 1 - ruler = torch.arange(len(ordered_bboxes)) - while ruler.shape[0] > 0: - basement = ruler[0] - ruler = ruler[1:] - idx = roi_idx[basement] - # calculate the body overlap - basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) - ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4) - overlap = bbox_overlaps(basement_bbox, ruler_bbox) - indices = torch.where(overlap > iou_threshold)[1] - loc = torch.where(roi_idx[ruler][indices] == idx) - # the mask won't change in the step - mask = keep[ruler[indices][loc]] - keep[ruler[indices]] = False - keep[ruler[indices][loc][mask]] = True - ruler[~keep[ruler]] = -1 - ruler = ruler[ruler > 0] - - keep = keep[order.sort()[1]] - return bboxes[keep][:max_num, :], scores[keep][:max_num] diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/mask_heads/grid_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/mask_heads/grid_head.py deleted file mode 100644 index d9514ae7bcfc1b7d5613fa0107e9bd087e13dd46..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/mask_heads/grid_head.py +++ /dev/null @@ -1,490 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, List, Tuple - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmengine.config import ConfigDict -from mmengine.model import BaseModule -from mmengine.structures import InstanceData -from torch import Tensor - -from mmdet.models.task_modules.samplers import SamplingResult -from mmdet.registry import MODELS -from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType - - -@MODELS.register_module() -class GridHead(BaseModule): - """Implementation of `Grid Head `_ - - Args: - grid_points (int): The number of grid points. Defaults to 9. - num_convs (int): The number of convolution layers. Defaults to 8. - roi_feat_size (int): RoI feature size. Default to 14. - in_channels (int): The channel number of inputs features. - Defaults to 256. - conv_kernel_size (int): The kernel size of convolution layers. - Defaults to 3. - point_feat_channels (int): The number of channels of each point - features. Defaults to 64. - class_agnostic (bool): Whether use class agnostic classification. - If so, the output channels of logits will be 1. Defaults to False. - loss_grid (:obj:`ConfigDict` or dict): Config of grid loss. - conv_cfg (:obj:`ConfigDict` or dict, optional) dictionary to - construct and config conv layer. - norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and - config norm layer. - init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ - dict]): Initialization config dict. - """ - - def __init__( - self, - grid_points: int = 9, - num_convs: int = 8, - roi_feat_size: int = 14, - in_channels: int = 256, - conv_kernel_size: int = 3, - point_feat_channels: int = 64, - deconv_kernel_size: int = 4, - class_agnostic: bool = False, - loss_grid: ConfigType = dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15), - conv_cfg: OptConfigType = None, - norm_cfg: ConfigType = dict(type='GN', num_groups=36), - init_cfg: MultiConfig = [ - dict(type='Kaiming', layer=['Conv2d', 'Linear']), - dict( - type='Normal', - layer='ConvTranspose2d', - std=0.001, - override=dict( - type='Normal', - name='deconv2', - std=0.001, - bias=-np.log(0.99 / 0.01))) - ] - ) -> None: - super().__init__(init_cfg=init_cfg) - self.grid_points = grid_points - self.num_convs = num_convs - self.roi_feat_size = roi_feat_size - self.in_channels = in_channels - self.conv_kernel_size = conv_kernel_size - self.point_feat_channels = point_feat_channels - self.conv_out_channels = self.point_feat_channels * self.grid_points - self.class_agnostic = class_agnostic - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': - assert self.conv_out_channels % norm_cfg['num_groups'] == 0 - - assert self.grid_points >= 4 - self.grid_size = int(np.sqrt(self.grid_points)) - if self.grid_size * self.grid_size != self.grid_points: - raise ValueError('grid_points must be a square number') - - # the predicted heatmap is half of whole_map_size - if not isinstance(self.roi_feat_size, int): - raise ValueError('Only square RoIs are supporeted in Grid R-CNN') - self.whole_map_size = self.roi_feat_size * 4 - - # compute point-wise sub-regions - self.sub_regions = self.calc_sub_regions() - - self.convs = [] - for i in range(self.num_convs): - in_channels = ( - self.in_channels if i == 0 else self.conv_out_channels) - stride = 2 if i == 0 else 1 - padding = (self.conv_kernel_size - 1) // 2 - self.convs.append( - ConvModule( - in_channels, - self.conv_out_channels, - self.conv_kernel_size, - stride=stride, - padding=padding, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - bias=True)) - self.convs = nn.Sequential(*self.convs) - - self.deconv1 = nn.ConvTranspose2d( - self.conv_out_channels, - self.conv_out_channels, - kernel_size=deconv_kernel_size, - stride=2, - padding=(deconv_kernel_size - 2) // 2, - groups=grid_points) - self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) - self.deconv2 = nn.ConvTranspose2d( - self.conv_out_channels, - grid_points, - kernel_size=deconv_kernel_size, - stride=2, - padding=(deconv_kernel_size - 2) // 2, - groups=grid_points) - - # find the 4-neighbor of each grid point - self.neighbor_points = [] - grid_size = self.grid_size - for i in range(grid_size): # i-th column - for j in range(grid_size): # j-th row - neighbors = [] - if i > 0: # left: (i - 1, j) - neighbors.append((i - 1) * grid_size + j) - if j > 0: # up: (i, j - 1) - neighbors.append(i * grid_size + j - 1) - if j < grid_size - 1: # down: (i, j + 1) - neighbors.append(i * grid_size + j + 1) - if i < grid_size - 1: # right: (i + 1, j) - neighbors.append((i + 1) * grid_size + j) - self.neighbor_points.append(tuple(neighbors)) - # total edges in the grid - self.num_edges = sum([len(p) for p in self.neighbor_points]) - - self.forder_trans = nn.ModuleList() # first-order feature transition - self.sorder_trans = nn.ModuleList() # second-order feature transition - for neighbors in self.neighbor_points: - fo_trans = nn.ModuleList() - so_trans = nn.ModuleList() - for _ in range(len(neighbors)): - # each transition module consists of a 5x5 depth-wise conv and - # 1x1 conv. - fo_trans.append( - nn.Sequential( - nn.Conv2d( - self.point_feat_channels, - self.point_feat_channels, - 5, - stride=1, - padding=2, - groups=self.point_feat_channels), - nn.Conv2d(self.point_feat_channels, - self.point_feat_channels, 1))) - so_trans.append( - nn.Sequential( - nn.Conv2d( - self.point_feat_channels, - self.point_feat_channels, - 5, - 1, - 2, - groups=self.point_feat_channels), - nn.Conv2d(self.point_feat_channels, - self.point_feat_channels, 1))) - self.forder_trans.append(fo_trans) - self.sorder_trans.append(so_trans) - - self.loss_grid = MODELS.build(loss_grid) - - def forward(self, x: Tensor) -> Dict[str, Tensor]: - """forward function of ``GridHead``. - - Args: - x (Tensor): RoI features, has shape - (num_rois, num_channels, roi_feat_size, roi_feat_size). - - Returns: - Dict[str, Tensor]: Return a dict including fused and unfused - heatmap. - """ - assert x.shape[-1] == x.shape[-2] == self.roi_feat_size - # RoI feature transformation, downsample 2x - x = self.convs(x) - - c = self.point_feat_channels - # first-order fusion - x_fo = [None for _ in range(self.grid_points)] - for i, points in enumerate(self.neighbor_points): - x_fo[i] = x[:, i * c:(i + 1) * c] - for j, point_idx in enumerate(points): - x_fo[i] = x_fo[i] + self.forder_trans[i][j]( - x[:, point_idx * c:(point_idx + 1) * c]) - - # second-order fusion - x_so = [None for _ in range(self.grid_points)] - for i, points in enumerate(self.neighbor_points): - x_so[i] = x[:, i * c:(i + 1) * c] - for j, point_idx in enumerate(points): - x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) - - # predicted heatmap with fused features - x2 = torch.cat(x_so, dim=1) - x2 = self.deconv1(x2) - x2 = F.relu(self.norm1(x2), inplace=True) - heatmap = self.deconv2(x2) - - # predicted heatmap with original features (applicable during training) - if self.training: - x1 = x - x1 = self.deconv1(x1) - x1 = F.relu(self.norm1(x1), inplace=True) - heatmap_unfused = self.deconv2(x1) - else: - heatmap_unfused = heatmap - - return dict(fused=heatmap, unfused=heatmap_unfused) - - def calc_sub_regions(self) -> List[Tuple[float]]: - """Compute point specific representation regions. - - See `Grid R-CNN Plus `_ for details. - """ - # to make it consistent with the original implementation, half_size - # is computed as 2 * quarter_size, which is smaller - half_size = self.whole_map_size // 4 * 2 - sub_regions = [] - for i in range(self.grid_points): - x_idx = i // self.grid_size - y_idx = i % self.grid_size - if x_idx == 0: - sub_x1 = 0 - elif x_idx == self.grid_size - 1: - sub_x1 = half_size - else: - ratio = x_idx / (self.grid_size - 1) - 0.25 - sub_x1 = max(int(ratio * self.whole_map_size), 0) - - if y_idx == 0: - sub_y1 = 0 - elif y_idx == self.grid_size - 1: - sub_y1 = half_size - else: - ratio = y_idx / (self.grid_size - 1) - 0.25 - sub_y1 = max(int(ratio * self.whole_map_size), 0) - sub_regions.append( - (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) - return sub_regions - - def get_targets(self, sampling_results: List[SamplingResult], - rcnn_train_cfg: ConfigDict) -> Tensor: - """Calculate the ground truth for all samples in a batch according to - the sampling_results.". - - Args: - sampling_results (List[:obj:`SamplingResult`]): Assign results of - all images in a batch after sampling. - rcnn_train_cfg (:obj:`ConfigDict`): `train_cfg` of RCNN. - - Returns: - Tensor: Grid heatmap targets. - """ - # mix all samples (across images) together. - pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], - dim=0).cpu() - pos_gt_bboxes = torch.cat( - [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() - assert pos_bboxes.shape == pos_gt_bboxes.shape - - # expand pos_bboxes to 2x of original size - x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 - y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 - x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 - y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 - pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) - pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) - pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) - - num_rois = pos_bboxes.shape[0] - map_size = self.whole_map_size - # this is not the final target shape - targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), - dtype=torch.float) - - # pre-compute interpolation factors for all grid points. - # the first item is the factor of x-dim, and the second is y-dim. - # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) - factors = [] - for j in range(self.grid_points): - x_idx = j // self.grid_size - y_idx = j % self.grid_size - factors.append((1 - x_idx / (self.grid_size - 1), - 1 - y_idx / (self.grid_size - 1))) - - radius = rcnn_train_cfg.pos_radius - radius2 = radius**2 - for i in range(num_rois): - # ignore small bboxes - if (pos_bbox_ws[i] <= self.grid_size - or pos_bbox_hs[i] <= self.grid_size): - continue - # for each grid point, mark a small circle as positive - for j in range(self.grid_points): - factor_x, factor_y = factors[j] - gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( - 1 - factor_x) * pos_gt_bboxes[i, 2] - gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( - 1 - factor_y) * pos_gt_bboxes[i, 3] - - cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * - map_size) - cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * - map_size) - - for x in range(cx - radius, cx + radius + 1): - for y in range(cy - radius, cy + radius + 1): - if x >= 0 and x < map_size and y >= 0 and y < map_size: - if (x - cx)**2 + (y - cy)**2 <= radius2: - targets[i, j, y, x] = 1 - # reduce the target heatmap size by a half - # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). - sub_targets = [] - for i in range(self.grid_points): - sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] - sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) - sub_targets = torch.cat(sub_targets, dim=1) - sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device) - return sub_targets - - def loss(self, grid_pred: Tensor, sample_idx: Tensor, - sampling_results: List[SamplingResult], - rcnn_train_cfg: ConfigDict) -> dict: - """Calculate the loss based on the features extracted by the grid head. - - Args: - grid_pred (dict[str, Tensor]): Outputs of grid_head forward. - sample_idx (Tensor): The sampling index of ``grid_pred``. - sampling_results (List[obj:SamplingResult]): Assign results of - all images in a batch after sampling. - rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN. - - Returns: - dict: A dictionary of loss and targets components. - """ - grid_targets = self.get_targets(sampling_results, rcnn_train_cfg) - grid_targets = grid_targets[sample_idx] - - loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) - loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) - loss_grid = loss_fused + loss_unfused - return dict(loss_grid=loss_grid) - - def predict_by_feat(self, - grid_preds: Dict[str, Tensor], - results_list: List[InstanceData], - batch_img_metas: List[dict], - rescale: bool = False) -> InstanceList: - """Adjust the predicted bboxes from bbox head. - - Args: - grid_preds (dict[str, Tensor]): dictionary outputted by forward - function. - results_list (list[:obj:`InstanceData`]): Detection results of - each image. - batch_img_metas (list[dict]): List of image information. - rescale (bool): If True, return boxes in original image space. - Defaults to False. - - Returns: - list[:obj:`InstanceData`]: Detection results of each image - after the post process. Each item usually contains following keys. - - - scores (Tensor): Classification scores, has a shape \ - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - - bboxes (Tensor): Has a shape (num_instances, 4), the last \ - dimension 4 arrange as (x1, y1, x2, y2). - """ - num_roi_per_img = tuple(res.bboxes.size(0) for res in results_list) - grid_preds = { - k: v.split(num_roi_per_img, 0) - for k, v in grid_preds.items() - } - - for i, results in enumerate(results_list): - if len(results) != 0: - bboxes = self._predict_by_feat_single( - grid_pred=grid_preds['fused'][i], - bboxes=results.bboxes, - img_meta=batch_img_metas[i], - rescale=rescale) - results.bboxes = bboxes - return results_list - - def _predict_by_feat_single(self, - grid_pred: Tensor, - bboxes: Tensor, - img_meta: dict, - rescale: bool = False) -> Tensor: - """Adjust ``bboxes`` according to ``grid_pred``. - - Args: - grid_pred (Tensor): Grid fused heatmap. - bboxes (Tensor): Predicted bboxes, has shape (n, 4) - img_meta (dict): image information. - rescale (bool): If True, return boxes in original image space. - Defaults to False. - - Returns: - Tensor: adjusted bboxes. - """ - assert bboxes.size(0) == grid_pred.size(0) - grid_pred = grid_pred.sigmoid() - - R, c, h, w = grid_pred.shape - half_size = self.whole_map_size // 4 * 2 - assert h == w == half_size - assert c == self.grid_points - - # find the point with max scores in the half-sized heatmap - grid_pred = grid_pred.view(R * c, h * w) - pred_scores, pred_position = grid_pred.max(dim=1) - xs = pred_position % w - ys = pred_position // w - - # get the position in the whole heatmap instead of half-sized heatmap - for i in range(self.grid_points): - xs[i::self.grid_points] += self.sub_regions[i][0] - ys[i::self.grid_points] += self.sub_regions[i][1] - - # reshape to (num_rois, grid_points) - pred_scores, xs, ys = tuple( - map(lambda x: x.view(R, c), [pred_scores, xs, ys])) - - # get expanded pos_bboxes - widths = (bboxes[:, 2] - bboxes[:, 0]).unsqueeze(-1) - heights = (bboxes[:, 3] - bboxes[:, 1]).unsqueeze(-1) - x1 = (bboxes[:, 0, None] - widths / 2) - y1 = (bboxes[:, 1, None] - heights / 2) - # map the grid point to the absolute coordinates - abs_xs = (xs.float() + 0.5) / w * widths + x1 - abs_ys = (ys.float() + 0.5) / h * heights + y1 - - # get the grid points indices that fall on the bbox boundaries - x1_inds = [i for i in range(self.grid_size)] - y1_inds = [i * self.grid_size for i in range(self.grid_size)] - x2_inds = [ - self.grid_points - self.grid_size + i - for i in range(self.grid_size) - ] - y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] - - # voting of all grid points on some boundary - bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) - bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) - bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) - bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) - - bboxes = torch.cat([bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2], dim=1) - bboxes[:, [0, 2]].clamp_(min=0, max=img_meta['img_shape'][1]) - bboxes[:, [1, 3]].clamp_(min=0, max=img_meta['img_shape'][0]) - - if rescale: - assert img_meta.get('scale_factor') is not None - bboxes /= bboxes.new_tensor(img_meta['scale_factor']).repeat( - (1, 2)) - - return bboxes diff --git a/spaces/KyanChen/RSPrompter/mmpl/models/__init__.py b/spaces/KyanChen/RSPrompter/mmpl/models/__init__.py deleted file mode 100644 index 4c0806f5b20bdf732ff752fe0937550484870f4b..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/models/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .builder import build_pler -from .pler import * -from .backbones import * -from .losses import * -from .heads import * -from .necks import * -from .data_preprocessors import * - -__all__ = ['build_pler'] \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmpl/structures/multi_task_data_sample.py b/spaces/KyanChen/RSPrompter/mmpl/structures/multi_task_data_sample.py deleted file mode 100644 index f00993861bfb4f35fb7d145198f81c5e9f0a5993..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/structures/multi_task_data_sample.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from mmengine.structures import BaseDataElement - - -class MultiTaskDataSample(BaseDataElement): - - @property - def tasks(self): - return self._data_fields diff --git a/spaces/Lavanya30/hiddenhunger/pages/human.py b/spaces/Lavanya30/hiddenhunger/pages/human.py deleted file mode 100644 index f73b2a86ab38f2794e3a57bc20e6004c0589e1d4..0000000000000000000000000000000000000000 --- a/spaces/Lavanya30/hiddenhunger/pages/human.py +++ /dev/null @@ -1,102 +0,0 @@ -import cv2 -import numpy as np -import streamlit as st -import tensorflow as tf -from tensorflow.keras.preprocessing import image -from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2,preprocess_input as mobilenet_v2_preprocess_input -st.header("Hidden Hunger") -st.write("To find micronutrient deficiency in human using the images of nails and eyes") -st.markdown(""" - -""", unsafe_allow_html=True) -uploaded_file = st.file_uploader("Choose a image file", type="jpg") -model = tf.keras.models.load_model(r"models/resnet152v2nail.h5") -camera=st.button("Capture") -if camera: - # Function to process each frame of the video - def process_frame(frame): - # Convert the frame to grayscale - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - - # Apply a threshold to the grayscale image - _, thresh = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY) - - return thresh - -# Create a VideoCapture object to capture video from the camera - cap = cv2.VideoCapture(0) - -# Set the dimensions of the video capture window - cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) - cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) - -# Define a function to capture video and display the results in the Streamlit app - def capture_video(): - while True: - # Read a frame from the camera - ret, frame = cap.read() - - # Process the frame - processed_frame = process_frame(frame) - - # Display the original and processed frames in the Streamlit app - st.image(np.hstack((frame, processed_frame)), width=640) - - # Check if the user has pressed the "Stop" button - if st.button('Stop'): - break - -# Call the function to capture video and display the results in the Streamlit app - capture_video() - -# Release the VideoCapture object and close the window - cap.release() - cv2.destroyAllWindows() -map_dict = {0: 'Iodine deficiency', - 1: 'Vitamin B12 deficiency', - 2: 'Vitamin D deficiency', - 3: 'Zinc deficiency', - 4: 'Healthy', - 5: 'Iron deficiency'} - -if uploaded_file is not None: - # Convert the file to an opencv image. - file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8) - opencv_image = cv2.imdecode(file_bytes, 1) - opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB) - resized = cv2.resize(opencv_image,(224,224)) - # Now do something with the image! For example, let's display it: - st.image(opencv_image, channels="RGB") - - resized = mobilenet_v2_preprocess_input(resized) - img_reshape = resized[np.newaxis,...] - - Genrate_pred = st.button("Generate Prediction") - if Genrate_pred: - prediction = model.predict(img_reshape).argmax() - st.title("Predicted Label for the image is {}".format(map_dict [prediction])) - diff --git a/spaces/Lbin123/Lbingo/src/components/tone-selector.tsx b/spaces/Lbin123/Lbingo/src/components/tone-selector.tsx deleted file mode 100644 index 5c6e464c91f564b895acd121f0a4a79ed9c5c356..0000000000000000000000000000000000000000 --- a/spaces/Lbin123/Lbingo/src/components/tone-selector.tsx +++ /dev/null @@ -1,43 +0,0 @@ -import React from 'react' -import { BingConversationStyle } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' - -type ToneItem = { - type: BingConversationStyle, - name: string -} - -const ToneList: ToneItem[] = [ - { name: '有创造力', type: BingConversationStyle.Creative }, - { name: '更平衡', type: BingConversationStyle.Balanced }, - { name: '更精确', type: BingConversationStyle.Precise } -] - -interface ToneSelectorProps { - type: BingConversationStyle | '' - onChange?: (type: BingConversationStyle) => void -} - -export function ToneSelector({ type, onChange }: ToneSelectorProps) { - return ( -
-
- 选择对话样式 -
-
-
    - { - ToneList.map(tone => ( -
  • onChange?.(tone.type)}> - -
  • - )) - } -
-
-
- ) -} diff --git a/spaces/Letheoricien/demo/README.md b/spaces/Letheoricien/demo/README.md deleted file mode 100644 index 69db85edfef0d7336fafd3a450f1a069fba83370..0000000000000000000000000000000000000000 --- a/spaces/Letheoricien/demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Demo -emoji: 💻 -colorFrom: yellow -colorTo: indigo -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/LightChen2333/OpenSLU/model/decoder/interaction/slot_gated_interaction.py b/spaces/LightChen2333/OpenSLU/model/decoder/interaction/slot_gated_interaction.py deleted file mode 100644 index 20735bdd69f0f2679a13a2d479d074045c62e039..0000000000000000000000000000000000000000 --- a/spaces/LightChen2333/OpenSLU/model/decoder/interaction/slot_gated_interaction.py +++ /dev/null @@ -1,59 +0,0 @@ -import math - -import einops -import torch -from torch import nn -import torch.nn.functional as F -from torch.nn import LayerNorm - -from common.utils import HiddenData -from model.decoder.interaction import BaseInteraction - - -class SlotGatedInteraction(BaseInteraction): - def __init__(self, **config): - super().__init__(**config) - self.intent_linear = nn.Linear(self.config["input_dim"],1, bias=False) - self.slot_linear1 = nn.Linear(self.config["input_dim"],1, bias=False) - self.slot_linear2 = nn.Linear(self.config["input_dim"],1, bias=False) - self.remove_slot_attn = self.config["remove_slot_attn"] - self.slot_gate = SlotGate(**config) - - def forward(self, encode_hidden: HiddenData, **kwargs): - input_hidden = encode_hidden.get_slot_hidden_state() - - seq_lens = encode_hidden.inputs.attention_mask.sum(-1) - output_list = [] - for index, slen in enumerate(seq_lens): - output_list.append(input_hidden[index, slen - 1, :].unsqueeze(0)) - intent_input = torch.cat(output_list, dim=0) - e_I = torch.tanh(self.intent_linear(intent_input)).squeeze(1) - alpha_I = einops.repeat(e_I, 'b -> b h', h=intent_input.shape[-1]) - c_I = alpha_I * intent_input - intent_hidden = intent_input+c_I - if not self.remove_slot_attn: - # slot attention - h_k = einops.repeat(self.slot_linear1(input_hidden), 'b l h -> b l c h', c=input_hidden.shape[1]) - h_i = einops.repeat(self.slot_linear2(input_hidden), 'b l h -> b l c h', c=input_hidden.shape[1]).transpose(1,2) - e_S = torch.tanh(h_k + h_i) - alpha_S = torch.softmax(e_S, dim=2).squeeze(3) - alpha_S = einops.repeat(alpha_S, 'b l1 l2 -> b l1 l2 h', h=input_hidden.shape[-1]) - map_input_hidden = einops.repeat(input_hidden, 'b l h -> b l c h', c=input_hidden.shape[1]) - c_S = torch.sum(alpha_S * map_input_hidden, dim=2) - else: - c_S = input_hidden - slot_hidden = input_hidden + c_S * self.slot_gate(c_S,c_I) - encode_hidden.update_intent_hidden_state(intent_hidden) - encode_hidden.update_slot_hidden_state(slot_hidden) - return encode_hidden - -class SlotGate(nn.Module): - def __init__(self, **config): - super().__init__() - self.linear = nn.Linear(config["input_dim"], config["output_dim"],bias=False) - self.v = nn.Parameter(torch.rand(size=[1])) - - def forward(self, slot_context, intent_context): - intent_gate = self.linear(intent_context) - intent_gate = einops.repeat(intent_gate, 'b h -> b l h', l=slot_context.shape[1]) - return self.v * torch.tanh(slot_context + intent_gate) diff --git a/spaces/MMMMQZ/MQZGPT/readme/README_en.md b/spaces/MMMMQZ/MQZGPT/readme/README_en.md deleted file mode 100644 index a906ecb3ebc411f5cdeb33d661266a489a20c3b0..0000000000000000000000000000000000000000 --- a/spaces/MMMMQZ/MQZGPT/readme/README_en.md +++ /dev/null @@ -1,127 +0,0 @@ -
- - 简体中文 | English | 日本語 -
- -

川虎 Chat 🐯 Chuanhu Chat

-
- - Logo - - -

-

Lightweight and User-friendly Web-UI for LLMs including ChatGPT/ChatGLM/LLaMA

-

- - Tests Passing - - - GitHub Contributors - - - GitHub pull requests - -

- Streaming / Unlimited conversations / Save history / Preset prompts / Chat with files / Web search
- LaTeX rendering / Table rendering / Code highlighting
- Auto dark mode / Adaptive web interface / WeChat-like theme
- Multi-parameters tuning / Multi-API-Key support / Multi-user support
- Compatible with GPT-4 / Local deployment for LLMs -

- Video Tutorial - · - 2.0 Introduction - · - 3.0 Introduction & Tutorial - || - Online trial - · - One-Click deployment -

-

- Animation Demo -

-

-
- -## Usage Tips - -- To better control the ChatGPT, use System Prompt. -- To use a Prompt Template, select the Prompt Template Collection file first, and then choose certain prompt from the drop-down menu. -- To try again if the response is unsatisfactory, use `🔄 Regenerate` button. -- To start a new line in the input box, press Shift + Enter keys. -- To quickly switch between input history, press and key in the input box. -- To deploy the program onto a server, change the last line of the program to `demo.launch(server_name="0.0.0.0", server_port=)`. -- To get a public shared link, change the last line of the program to `demo.launch(share=True)`. Please be noted that the program must be running in order to be accessed via a public link. -- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience. - -## Installation - -```shell -git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git -cd ChuanhuChatGPT -pip install -r requirements.txt -``` - -Then make a copy of `config_example.json`, rename it to `config.json`, and then fill in your API-Key and other settings in the file. - -```shell -python ChuanhuChatbot.py -``` - -A browser window will open and you will be able to chat with ChatGPT. - -> **Note** -> -> Please check our [wiki page](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) for detailed instructions. - -## Troubleshooting - -When you encounter problems, you should try manually pulling the latest changes of this project first. The steps are as follows: - -1. Download the latest code archive by clicking on `Download ZIP` on the webpage, or - ```shell - git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f - ``` -2. Try installing the dependencies again (as this project may have introduced new dependencies) - ``` - pip install -r requirements.txt - ``` -3. Update Gradio - ``` - pip install gradio --upgrade --force-reinstall - ``` - -Generally, you can solve most problems by following these steps. - -If the problem still exists, please refer to this page: [Frequently Asked Questions (FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) - -This page lists almost all the possible problems and solutions. Please read it carefully. - -## More Information - -More information could be found in our [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki): - -- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization) -- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南) -- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目) -- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志) -- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可) - -## Starchart - -[![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date) - -## Contributors - - - - - -## Sponsor - -🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~ - -Buy Me A Coffee - -image diff --git a/spaces/Mackiemetal/dreamlike-photoreal-2.0/app.py b/spaces/Mackiemetal/dreamlike-photoreal-2.0/app.py deleted file mode 100644 index ebdb5095a0691dadeebfbd16dfdfeb5fa95a0400..0000000000000000000000000000000000000000 --- a/spaces/Mackiemetal/dreamlike-photoreal-2.0/app.py +++ /dev/null @@ -1,137 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image - -model_id = 'dreamlike-art/dreamlike-photoreal-2.0' -prefix = '' - -scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe_i2i = pipe_i2i.to("cuda") - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False): - - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - prompt = f"{prefix} {prompt}" if auto_prefix else prompt - - try: - if img is not None: - return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - -def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator): - - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe_i2i( - prompt, - negative_prompt = neg_prompt, - init_image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
-
-

Dreamlike Photoreal 2.0

-
-

- Demo for Dreamlike Photoreal 2.0 Stable Diffusion model.
- {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""} -

- Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space

- Duplicate Space -
- """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - image_out = gr.Image(height=512) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False) - - inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - gr.HTML(""" -
-
-

This space was created using SD Space Creator.

-
- """) - -demo.queue(concurrency_count=1) -demo.launch() diff --git a/spaces/Moxxie-nolastname/Not-Moxxie-Proxy/Dockerfile b/spaces/Moxxie-nolastname/Not-Moxxie-Proxy/Dockerfile deleted file mode 100644 index 4cb0ce42128d9a2ad33a395883f5e5455a38c707..0000000000000000000000000000000000000000 --- a/spaces/Moxxie-nolastname/Not-Moxxie-Proxy/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/Mradul/mlrc-bana/README.md b/spaces/Mradul/mlrc-bana/README.md deleted file mode 100644 index 9dd04df84cbf56fccb03cb401ba1b76db674f792..0000000000000000000000000000000000000000 --- a/spaces/Mradul/mlrc-bana/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Mlrc Bana -emoji: 🐢 -colorFrom: pink -colorTo: pink -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/MuhammedAyman29/Fruits/app.py b/spaces/MuhammedAyman29/Fruits/app.py deleted file mode 100644 index ee4563bb0f0a92906a5d8b39d34410258e1e2883..0000000000000000000000000000000000000000 --- a/spaces/MuhammedAyman29/Fruits/app.py +++ /dev/null @@ -1,24 +0,0 @@ -import pathlib -temp = pathlib.WindowsPath -pathlib.WindowsPath = pathlib.PosixPath - -import gradio as gr -from fastai.vision.all import * -import skimage - - -learn= load_learner('export.pkl') -def predict(img): - #img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - -examples = ['Apple.jpg', 'banana.jpg'] -labels = learn.dls.vocab -title = "Fruits Classifier" -description = "Fruits classifier with fastai. Created as a demo for Gradio and HuggingFace Spaces." -interpretation='default' -enable_queue=True - - -gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(192, 192)),outputs=gr.outputs.Label(num_top_classes=4),title=title,description=description,interpretation=interpretation,enable_queue=enable_queue).launch() \ No newline at end of file diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/tf2_encoder_checkpoint_converter.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/tf2_encoder_checkpoint_converter.py deleted file mode 100644 index 2faf6ea2cfb9f0d71d0a79dff101e0408fa41778..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/tf2_encoder_checkpoint_converter.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A converter from a V1 BERT encoder checkpoint to a V2 encoder checkpoint. - -The conversion will yield an object-oriented checkpoint that can be used -to restore a TransformerEncoder object. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -from absl import app -from absl import flags - -import tensorflow as tf -from official.modeling import activations -from official.nlp.bert import configs -from official.nlp.bert import tf1_checkpoint_converter_lib -from official.nlp.modeling import networks - -FLAGS = flags.FLAGS - -flags.DEFINE_string("bert_config_file", None, - "Bert configuration file to define core bert layers.") -flags.DEFINE_string( - "checkpoint_to_convert", None, - "Initial checkpoint from a pretrained BERT model core (that is, only the " - "BertModel, with no task heads.)") -flags.DEFINE_string("converted_checkpoint_path", None, - "Name for the created object-based V2 checkpoint.") - - -def _create_bert_model(cfg): - """Creates a BERT keras core model from BERT configuration. - - Args: - cfg: A `BertConfig` to create the core model. - Returns: - A TransformerEncoder netowork. - """ - bert_encoder = networks.TransformerEncoder( - vocab_size=cfg.vocab_size, - hidden_size=cfg.hidden_size, - num_layers=cfg.num_hidden_layers, - num_attention_heads=cfg.num_attention_heads, - intermediate_size=cfg.intermediate_size, - activation=activations.gelu, - dropout_rate=cfg.hidden_dropout_prob, - attention_dropout_rate=cfg.attention_probs_dropout_prob, - sequence_length=cfg.max_position_embeddings, - type_vocab_size=cfg.type_vocab_size, - initializer=tf.keras.initializers.TruncatedNormal( - stddev=cfg.initializer_range), - embedding_width=cfg.embedding_size) - - return bert_encoder - - -def convert_checkpoint(bert_config, output_path, v1_checkpoint): - """Converts a V1 checkpoint into an OO V2 checkpoint.""" - output_dir, _ = os.path.split(output_path) - - # Create a temporary V1 name-converted checkpoint in the output directory. - temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1") - temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt") - tf1_checkpoint_converter_lib.convert( - checkpoint_from_path=v1_checkpoint, - checkpoint_to_path=temporary_checkpoint, - num_heads=bert_config.num_attention_heads, - name_replacements=tf1_checkpoint_converter_lib.BERT_V2_NAME_REPLACEMENTS, - permutations=tf1_checkpoint_converter_lib.BERT_V2_PERMUTATIONS, - exclude_patterns=["adam", "Adam"]) - - # Create a V2 checkpoint from the temporary checkpoint. - model = _create_bert_model(bert_config) - tf1_checkpoint_converter_lib.create_v2_checkpoint(model, temporary_checkpoint, - output_path) - - # Clean up the temporary checkpoint, if it exists. - try: - tf.io.gfile.rmtree(temporary_checkpoint_dir) - except tf.errors.OpError: - # If it doesn't exist, we don't need to clean it up; continue. - pass - - -def main(_): - output_path = FLAGS.converted_checkpoint_path - v1_checkpoint = FLAGS.checkpoint_to_convert - bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) - convert_checkpoint(bert_config, output_path, v1_checkpoint) - - -if __name__ == "__main__": - app.run(main) diff --git a/spaces/NCTCMumbai/NCTC/models/official/recommendation/data_test.py b/spaces/NCTCMumbai/NCTC/models/official/recommendation/data_test.py deleted file mode 100644 index 9541ee3f8bb4c65fb1f69070fa3876ee51b6c191..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/recommendation/data_test.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright 2018 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Test NCF data pipeline.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import defaultdict -import hashlib -import os - -import mock -import numpy as np -import scipy.stats -import tensorflow as tf - -from official.recommendation import constants as rconst -from official.recommendation import data_preprocessing -from official.recommendation import movielens -from official.recommendation import popen_helper - - -DATASET = "ml-test" -NUM_USERS = 1000 -NUM_ITEMS = 2000 -NUM_PTS = 50000 -BATCH_SIZE = 2048 -EVAL_BATCH_SIZE = 4000 -NUM_NEG = 4 - - -END_TO_END_TRAIN_MD5 = "b218738e915e825d03939c5e305a2698" -END_TO_END_EVAL_MD5 = "d753d0f3186831466d6e218163a9501e" -FRESH_RANDOMNESS_MD5 = "63d0dff73c0e5f1048fbdc8c65021e22" - - -def mock_download(*args, **kwargs): - return - - -# The forkpool used by data producers interacts badly with the threading -# used by TestCase. Without this patch tests will hang, and no amount -# of diligent closing and joining within the producer will prevent it. -@mock.patch.object(popen_helper, "get_forkpool", popen_helper.get_fauxpool) -class BaseTest(tf.test.TestCase): - - def setUp(self): - tf.compat.v1.disable_eager_execution() - self.temp_data_dir = self.get_temp_dir() - ratings_folder = os.path.join(self.temp_data_dir, DATASET) - tf.io.gfile.makedirs(ratings_folder) - np.random.seed(0) - raw_user_ids = np.arange(NUM_USERS * 3) - np.random.shuffle(raw_user_ids) - raw_user_ids = raw_user_ids[:NUM_USERS] - - raw_item_ids = np.arange(NUM_ITEMS * 3) - np.random.shuffle(raw_item_ids) - raw_item_ids = raw_item_ids[:NUM_ITEMS] - - users = np.random.choice(raw_user_ids, NUM_PTS) - items = np.random.choice(raw_item_ids, NUM_PTS) - scores = np.random.randint(low=0, high=5, size=NUM_PTS) - times = np.random.randint(low=1000000000, high=1200000000, size=NUM_PTS) - - self.rating_file = os.path.join(ratings_folder, movielens.RATINGS_FILE) - self.seen_pairs = set() - self.holdout = {} - with tf.io.gfile.GFile(self.rating_file, "w") as f: - f.write("user_id,item_id,rating,timestamp\n") - for usr, itm, scr, ts in zip(users, items, scores, times): - pair = (usr, itm) - if pair in self.seen_pairs: - continue - self.seen_pairs.add(pair) - if usr not in self.holdout or (ts, itm) > self.holdout[usr]: - self.holdout[usr] = (ts, itm) - - f.write("{},{},{},{}\n".format(usr, itm, scr, ts)) - - movielens.download = mock_download - movielens.NUM_RATINGS[DATASET] = NUM_PTS - movielens.DATASET_TO_NUM_USERS_AND_ITEMS[DATASET] = (NUM_USERS, NUM_ITEMS) - - def make_params(self, train_epochs=1): - return { - "train_epochs": train_epochs, - "batches_per_step": 1, - "use_seed": False, - "batch_size": BATCH_SIZE, - "eval_batch_size": EVAL_BATCH_SIZE, - "num_neg": NUM_NEG, - "match_mlperf": True, - "use_tpu": False, - "use_xla_for_gpu": False, - "stream_files": False, - } - - def test_preprocessing(self): - # For the most part the necessary checks are performed within - # _filter_index_sort() - - cache_path = os.path.join(self.temp_data_dir, "test_cache.pickle") - data, valid_cache = data_preprocessing._filter_index_sort( - self.rating_file, cache_path=cache_path) - - assert len(data[rconst.USER_MAP]) == NUM_USERS - assert len(data[rconst.ITEM_MAP]) == NUM_ITEMS - - def drain_dataset(self, dataset, g): - # type: (tf.data.Dataset, tf.Graph) -> list - with self.session(graph=g) as sess: - with g.as_default(): - batch = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next() - output = [] - while True: - try: - output.append(sess.run(batch)) - except tf.errors.OutOfRangeError: - break - return output - - def _test_end_to_end(self, constructor_type): - params = self.make_params(train_epochs=1) - _, _, producer = data_preprocessing.instantiate_pipeline( - dataset=DATASET, data_dir=self.temp_data_dir, params=params, - constructor_type=constructor_type, deterministic=True) - - producer.start() - producer.join() - assert producer._fatal_exception is None - - user_inv_map = {v: k for k, v in producer.user_map.items()} - item_inv_map = {v: k for k, v in producer.item_map.items()} - - # ========================================================================== - # == Training Data ========================================================= - # ========================================================================== - g = tf.Graph() - with g.as_default(): - input_fn = producer.make_input_fn(is_training=True) - dataset = input_fn(params) - - first_epoch = self.drain_dataset(dataset=dataset, g=g) - - counts = defaultdict(int) - train_examples = { - True: set(), - False: set(), - } - - md5 = hashlib.md5() - for features, labels in first_epoch: - data_list = [ - features[movielens.USER_COLUMN].flatten(), - features[movielens.ITEM_COLUMN].flatten(), - features[rconst.VALID_POINT_MASK].flatten(), - labels.flatten() - ] - for i in data_list: - md5.update(i.tobytes()) - - for u, i, v, l in zip(*data_list): - if not v: - continue # ignore padding - - u_raw = user_inv_map[u] - i_raw = item_inv_map[i] - if ((u_raw, i_raw) in self.seen_pairs) != l: - # The evaluation item is not considered during false negative - # generation, so it will occasionally appear as a negative example - # during training. - assert not l - self.assertEqual(i_raw, self.holdout[u_raw][1]) - train_examples[l].add((u_raw, i_raw)) - counts[(u_raw, i_raw)] += 1 - - self.assertRegexpMatches(md5.hexdigest(), END_TO_END_TRAIN_MD5) - - num_positives_seen = len(train_examples[True]) - self.assertEqual(producer._train_pos_users.shape[0], num_positives_seen) - - # This check is more heuristic because negatives are sampled with - # replacement. It only checks that negative generation is reasonably random. - self.assertGreater( - len(train_examples[False]) / NUM_NEG / num_positives_seen, 0.9) - - # This checks that the samples produced are independent by checking the - # number of duplicate entries. If workers are not properly independent there - # will be lots of repeated pairs. - self.assertLess(np.mean(list(counts.values())), 1.1) - - # ========================================================================== - # == Eval Data ============================================================= - # ========================================================================== - with g.as_default(): - input_fn = producer.make_input_fn(is_training=False) - dataset = input_fn(params) - - eval_data = self.drain_dataset(dataset=dataset, g=g) - - current_user = None - md5 = hashlib.md5() - for features in eval_data: - data_list = [ - features[movielens.USER_COLUMN].flatten(), - features[movielens.ITEM_COLUMN].flatten(), - features[rconst.DUPLICATE_MASK].flatten() - ] - for i in data_list: - md5.update(i.tobytes()) - - for idx, (u, i, d) in enumerate(zip(*data_list)): - u_raw = user_inv_map[u] - i_raw = item_inv_map[i] - if current_user is None: - current_user = u - - # Ensure that users appear in blocks, as the evaluation logic expects - # this structure. - self.assertEqual(u, current_user) - - # The structure of evaluation data is 999 negative examples followed - # by the holdout positive. - if not (idx + 1) % (rconst.NUM_EVAL_NEGATIVES + 1): - # Check that the last element in each chunk is the holdout item. - self.assertEqual(i_raw, self.holdout[u_raw][1]) - current_user = None - - elif i_raw == self.holdout[u_raw][1]: - # Because the holdout item is not given to the negative generation - # process, it can appear as a negative. In that case, it should be - # masked out as a duplicate. (Since the true positive is placed at - # the end and would therefore lose the tie.) - assert d - - else: - # Otherwise check that the other 999 points for a user are selected - # from the negatives. - assert (u_raw, i_raw) not in self.seen_pairs - - self.assertRegexpMatches(md5.hexdigest(), END_TO_END_EVAL_MD5) - - def _test_fresh_randomness(self, constructor_type): - train_epochs = 5 - params = self.make_params(train_epochs=train_epochs) - _, _, producer = data_preprocessing.instantiate_pipeline( - dataset=DATASET, data_dir=self.temp_data_dir, params=params, - constructor_type=constructor_type, deterministic=True) - - producer.start() - - results = [] - g = tf.Graph() - with g.as_default(): - for _ in range(train_epochs): - input_fn = producer.make_input_fn(is_training=True) - dataset = input_fn(params) - results.extend(self.drain_dataset(dataset=dataset, g=g)) - - producer.join() - assert producer._fatal_exception is None - - positive_counts, negative_counts = defaultdict(int), defaultdict(int) - md5 = hashlib.md5() - for features, labels in results: - data_list = [ - features[movielens.USER_COLUMN].flatten(), - features[movielens.ITEM_COLUMN].flatten(), - features[rconst.VALID_POINT_MASK].flatten(), - labels.flatten() - ] - for i in data_list: - md5.update(i.tobytes()) - - for u, i, v, l in zip(*data_list): - if not v: - continue # ignore padding - - if l: - positive_counts[(u, i)] += 1 - else: - negative_counts[(u, i)] += 1 - - self.assertRegexpMatches(md5.hexdigest(), FRESH_RANDOMNESS_MD5) - - # The positive examples should appear exactly once each epoch - self.assertAllEqual(list(positive_counts.values()), - [train_epochs for _ in positive_counts]) - - # The threshold for the negatives is heuristic, but in general repeats are - # expected, but should not appear too frequently. - - pair_cardinality = NUM_USERS * NUM_ITEMS - neg_pair_cardinality = pair_cardinality - len(self.seen_pairs) - - # Approximation for the expectation number of times that a particular - # negative will appear in a given epoch. Implicit in this calculation is the - # treatment of all negative pairs as equally likely. Normally is not - # necessarily reasonable; however the generation in self.setUp() will - # approximate this behavior sufficiently for heuristic testing. - e_sample = len(self.seen_pairs) * NUM_NEG / neg_pair_cardinality - - # The frequency of occurance of a given negative pair should follow an - # approximately binomial distribution in the limit that the cardinality of - # the negative pair set >> number of samples per epoch. - approx_pdf = scipy.stats.binom.pmf(k=np.arange(train_epochs+1), - n=train_epochs, p=e_sample) - - # Tally the actual observed counts. - count_distribution = [0 for _ in range(train_epochs + 1)] - for i in negative_counts.values(): - i = min([i, train_epochs]) # round down tail for simplicity. - count_distribution[i] += 1 - count_distribution[0] = neg_pair_cardinality - sum(count_distribution[1:]) - - # Check that the frequency of negative pairs is approximately binomial. - for i in range(train_epochs + 1): - if approx_pdf[i] < 0.05: - continue # Variance will be high at the tails. - - observed_fraction = count_distribution[i] / neg_pair_cardinality - deviation = (2 * abs(observed_fraction - approx_pdf[i]) / - (observed_fraction + approx_pdf[i])) - - self.assertLess(deviation, 0.2) - - def test_end_to_end_materialized(self): - self._test_end_to_end("materialized") - - def test_end_to_end_bisection(self): - self._test_end_to_end("bisection") - - def test_fresh_randomness_materialized(self): - self._test_fresh_randomness("materialized") - - def test_fresh_randomness_bisection(self): - self._test_fresh_randomness("bisection") - - -if __name__ == "__main__": - tf.test.main() diff --git a/spaces/Natsha/mocap-ai/README.md b/spaces/Natsha/mocap-ai/README.md deleted file mode 100644 index 6e9094ffd8b8c3b8d102c171110b072c9457043d..0000000000000000000000000000000000000000 --- a/spaces/Natsha/mocap-ai/README.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Optical Motion Capture AI -sdk: docker -app_port: 7860 -app_file: app.py ---- - -# mocap-ai -Functionality to load FBX files, extract animation, process the animation and write it back to the file. - -# Classifier -* Globals: file with hardcoded values like the marker names. -* Utilities: - * Visualizations -* FBX Handler: - * Load the `.fbx` file. - * Go through each frame in the animation frame range and check if all skeleton nodes have a keyframe there. - * If a keyframe is missing, remove that frame number from the valid frame numbers. - * After finding all valid frames, go through all marker translation channels and store the global transform in a `pandas` DataFrame. - * Add the actor numbers as categorical variables. - * Save the DataFrame to a `.csv` file. -* Inference file loader - * Same as training file loader, but this one should process all frames regardless of keyframe presence. -* Data augmentation: - * Isolate a marker set. - * Translate and rotate (optionally scale) with boundary check. -* Model builder: - * Instantiate a model with various hyperparameters. -* Training loop: - * Train given model with callbacks. -* Test loop: - * Validate model on validation/test data. -* Development script: - * Create new model, train it and test it. -* Deployment script: - * Deploys the model in a Docker image on HuggingFace. - - -## References: -1. PointNet: -- Research paper: Qi, Charles R., et al. "PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation." CVPR. 2017. [arXiv:1612.00593](https://arxiv.org/abs/1612.00593) -- Official code repository (TensorFlow): https://github.com/charlesq34/pointnet -- Official code repository (PyTorch): https://github.com/fxia22/pointnet.pytorch -2. PointNet++: -- Research paper: Qi, Charles R., et al. "PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space." NeurIPS. 2017. [arXiv:1706.02413](https://arxiv.org/abs/1706.02413) -- Official code repository (TensorFlow): https://github.com/charlesq34/pointnet2 -- Official code repository (PyTorch): https://github.com/erikwijmans/Pointnet2_PyTorch -3. DGCNN: -- Research paper: Wang, Yue, et al. "Dynamic Graph CNN for Learning on Point Clouds." ACM Transactions on Graphics (TOG) 38.5 (2019): 1-12. [arXiv:1801.07829](https://arxiv.org/abs/1801.07829) -- Official code repository (TensorFlow): https://github.com/WangYueFt/dgcnn -- Official code repository (PyTorch): https://github.com/muhanzhang/DGCNN \ No newline at end of file diff --git a/spaces/Nee001/bing0/src/lib/hooks/chat-history.ts b/spaces/Nee001/bing0/src/lib/hooks/chat-history.ts deleted file mode 100644 index c6fbf3fecfa86fe553f56acc8253236b8f22a775..0000000000000000000000000000000000000000 --- a/spaces/Nee001/bing0/src/lib/hooks/chat-history.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { zip } from 'lodash-es' -import { ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { Storage } from '../storage' - -/** - * conversations:$botId => Conversation[] - * conversation:$botId:$cid:messages => ChatMessageModel[] - */ - -interface Conversation { - id: string - createdAt: number -} - -type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] } - -async function loadHistoryConversations(botId: BotId): Promise { - const key = `conversations:${botId}` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -async function deleteHistoryConversation(botId: BotId, cid: string) { - const conversations = await loadHistoryConversations(botId) - const newConversations = conversations.filter((c) => c.id !== cid) - await Storage.set({ [`conversations:${botId}`]: newConversations }) -} - -async function loadConversationMessages(botId: BotId, cid: string): Promise { - const key = `conversation:${botId}:${cid}:messages` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) { - const conversations = await loadHistoryConversations(botId) - if (!conversations.some((c) => c.id === cid)) { - conversations.unshift({ id: cid, createdAt: Date.now() }) - await Storage.set({ [`conversations:${botId}`]: conversations }) - } - const key = `conversation:${botId}:${cid}:messages` - await Storage.set({ [key]: messages }) -} - -export async function loadHistoryMessages(botId: BotId): Promise { - const conversations = await loadHistoryConversations(botId) - const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id))) - return zip(conversations, messagesList).map(([c, messages]) => ({ - id: c!.id, - createdAt: c!.createdAt, - messages: messages!, - })) -} - -export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) { - const messages = await loadConversationMessages(botId, conversationId) - const newMessages = messages.filter((m) => m.id !== messageId) - await setConversationMessages(botId, conversationId, newMessages) - if (!newMessages.length) { - await deleteHistoryConversation(botId, conversationId) - } -} diff --git a/spaces/NimaBoscarino/climategan/climategan/depth.py b/spaces/NimaBoscarino/climategan/climategan/depth.py deleted file mode 100644 index d8d408448b82b1d11043131b61897b8467192e65..0000000000000000000000000000000000000000 --- a/spaces/NimaBoscarino/climategan/climategan/depth.py +++ /dev/null @@ -1,230 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from climategan.blocks import BaseDecoder, Conv2dBlock, InterpolateNearest2d -from climategan.utils import find_target_size - - -def create_depth_decoder(opts, no_init=False, verbose=0): - if opts.gen.d.architecture == "base": - decoder = BaseDepthDecoder(opts) - if "s" in opts.task: - assert opts.gen.s.use_dada is False - if "m" in opts.tasks: - assert opts.gen.m.use_dada is False - else: - decoder = DADADepthDecoder(opts) - - if verbose > 0: - print(f" - Add {decoder.__class__.__name__}") - - return decoder - - -class DADADepthDecoder(nn.Module): - """ - Depth decoder based on depth auxiliary task in DADA paper - """ - - def __init__(self, opts): - super().__init__() - if ( - opts.gen.encoder.architecture == "deeplabv3" - and opts.gen.deeplabv3.backbone == "mobilenet" - ): - res_dim = 320 - else: - res_dim = 2048 - - mid_dim = 512 - - self.do_feat_fusion = False - if opts.gen.m.use_dada or ("s" in opts.tasks and opts.gen.s.use_dada): - self.do_feat_fusion = True - self.dec4 = Conv2dBlock( - 128, - res_dim, - 1, - stride=1, - padding=0, - bias=True, - activation="lrelu", - norm="none", - ) - - self.relu = nn.ReLU(inplace=True) - self.enc4_1 = Conv2dBlock( - res_dim, - mid_dim, - 1, - stride=1, - padding=0, - bias=False, - activation="lrelu", - pad_type="reflect", - norm="batch", - ) - self.enc4_2 = Conv2dBlock( - mid_dim, - mid_dim, - 3, - stride=1, - padding=1, - bias=False, - activation="lrelu", - pad_type="reflect", - norm="batch", - ) - self.enc4_3 = Conv2dBlock( - mid_dim, - 128, - 1, - stride=1, - padding=0, - bias=False, - activation="lrelu", - pad_type="reflect", - norm="batch", - ) - self.upsample = None - if opts.gen.d.upsample_featuremaps: - self.upsample = nn.Sequential( - *[ - InterpolateNearest2d(), - Conv2dBlock( - 128, - 32, - 3, - stride=1, - padding=1, - bias=False, - activation="lrelu", - pad_type="reflect", - norm="batch", - ), - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - ] - ) - self._target_size = find_target_size(opts, "d") - print( - " - {}: setting target size to {}".format( - self.__class__.__name__, self._target_size - ) - ) - - def set_target_size(self, size): - """ - Set final interpolation's target size - - Args: - size (int, list, tuple): target size (h, w). If int, target will be (i, i) - """ - if isinstance(size, (list, tuple)): - self._target_size = size[:2] - else: - self._target_size = (size, size) - - def forward(self, z): - if isinstance(z, (list, tuple)): - z = z[0] - z4_enc = self.enc4_1(z) - z4_enc = self.enc4_2(z4_enc) - z4_enc = self.enc4_3(z4_enc) - - z_depth = None - if self.do_feat_fusion: - z_depth = self.dec4(z4_enc) - - if self.upsample is not None: - z4_enc = self.upsample(z4_enc) - - depth = torch.mean(z4_enc, dim=1, keepdim=True) # DADA paper decoder - if depth.shape[-1] != self._target_size: - depth = F.interpolate( - depth, - size=(384, 384), # size used in MiDaS inference - mode="bicubic", # what MiDaS uses - align_corners=False, - ) - - depth = F.interpolate( - depth, (self._target_size, self._target_size), mode="nearest" - ) # what we used in the transforms to resize input - - return depth, z_depth - - def __str__(self): - return "DADA Depth Decoder" - - -class BaseDepthDecoder(BaseDecoder): - def __init__(self, opts): - low_level_feats_dim = -1 - use_v3 = opts.gen.encoder.architecture == "deeplabv3" - use_mobile_net = opts.gen.deeplabv3.backbone == "mobilenet" - use_low = opts.gen.d.use_low_level_feats - - if use_v3 and use_mobile_net: - input_dim = 320 - if use_low: - low_level_feats_dim = 24 - elif use_v3: - input_dim = 2048 - if use_low: - low_level_feats_dim = 256 - else: - input_dim = 2048 - - n_upsample = 1 if opts.gen.d.upsample_featuremaps else 0 - output_dim = ( - 1 - if not opts.gen.d.classify.enable - else opts.gen.d.classify.linspace.buckets - ) - - self._target_size = find_target_size(opts, "d") - print( - " - {}: setting target size to {}".format( - self.__class__.__name__, self._target_size - ) - ) - - super().__init__( - n_upsample=n_upsample, - n_res=opts.gen.d.n_res, - input_dim=input_dim, - proj_dim=opts.gen.d.proj_dim, - output_dim=output_dim, - norm=opts.gen.d.norm, - activ=opts.gen.d.activ, - pad_type=opts.gen.d.pad_type, - output_activ="none", - low_level_feats_dim=low_level_feats_dim, - ) - - def set_target_size(self, size): - """ - Set final interpolation's target size - - Args: - size (int, list, tuple): target size (h, w). If int, target will be (i, i) - """ - if isinstance(size, (list, tuple)): - self._target_size = size[:2] - else: - self._target_size = (size, size) - - def forward(self, z, cond=None): - if self._target_size is None: - error = "self._target_size should be set with self.set_target_size()" - error += "to interpolate depth to the target depth map's size" - raise ValueError(error) - - d = super().forward(z) - - preds = F.interpolate( - d, size=self._target_size, mode="bilinear", align_corners=True - ) - - return preds, None diff --git a/spaces/Nyari/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/archs/discriminator_arch.py b/spaces/Nyari/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/archs/discriminator_arch.py deleted file mode 100644 index ccd810559201624bc6c20ea9b60009b927ecadd6..0000000000000000000000000000000000000000 --- a/spaces/Nyari/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/archs/discriminator_arch.py +++ /dev/null @@ -1,67 +0,0 @@ -from basicsr.utils.registry import ARCH_REGISTRY -from torch import nn as nn -from torch.nn import functional as F -from torch.nn.utils import spectral_norm - - -@ARCH_REGISTRY.register() -class UNetDiscriminatorSN(nn.Module): - """Defines a U-Net discriminator with spectral normalization (SN) - - It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - Arg: - num_in_ch (int): Channel number of inputs. Default: 3. - num_feat (int): Channel number of base intermediate features. Default: 64. - skip_connection (bool): Whether to use skip connections between U-Net. Default: True. - """ - - def __init__(self, num_in_ch, num_feat=64, skip_connection=True): - super(UNetDiscriminatorSN, self).__init__() - self.skip_connection = skip_connection - norm = spectral_norm - # the first convolution - self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1) - # downsample - self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False)) - self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False)) - self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False)) - # upsample - self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False)) - self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False)) - self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False)) - # extra convolutions - self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) - self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) - self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1) - - def forward(self, x): - # downsample - x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True) - x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True) - x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True) - x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True) - - # upsample - x3 = F.interpolate(x3, scale_factor=2, mode="bilinear", align_corners=False) - x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x4 = x4 + x2 - x4 = F.interpolate(x4, scale_factor=2, mode="bilinear", align_corners=False) - x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x5 = x5 + x1 - x5 = F.interpolate(x5, scale_factor=2, mode="bilinear", align_corners=False) - x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x6 = x6 + x0 - - # extra convolutions - out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True) - out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True) - out = self.conv9(out) - - return out diff --git a/spaces/OAOA/DifFace/basicsr/ops/fused_act/__init__.py b/spaces/OAOA/DifFace/basicsr/ops/fused_act/__init__.py deleted file mode 100644 index 241dc0754fae7d88dbbd9a02e665ca30a73c7422..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/ops/fused_act/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .fused_act import FusedLeakyReLU, fused_leaky_relu - -__all__ = ['FusedLeakyReLU', 'fused_leaky_relu'] diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/README.md b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/README.md deleted file mode 100644 index 7a76ffd57c066c20af94aa3fca24c18e2ba4c3dd..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Generative Spoken Language Modeling - -* [Paper](https://arxiv.org/abs/2102.01192) -* [Demo](https://speechbot.github.io/gslm/index.html) - -We build and evaluate generative speech2speech systems using [Log Mel Filtebank](https://pytorch.org/audio/stable/compliance.kaldi.html#fbank), [Modified CPC](https://github.com/facebookresearch/CPC_audio), [HuBERT Base](https://github.com/pytorch/fairseq/tree/main/examples/hubert) and [Wav2Vec 2.0 Large](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec). Our system is composed of three components, namely, *speech2unit*, *ulm* and *unit2speech*. We explain about models and usage of these components in their respective sub-directories. See the links below. - -## Speech to Unit Model (speech2unit) -Speech to unit model is used for quantizing raw speech into learned discrete speech units. [More details](speech2unit) - -## Unit Language Model (ulm) -Unit Language Model is a generative language model trained on discrete speech units. [More details](ulm) - -## Unit to Speech Model (unit2speech) -Unit to speech model is used for synthesizing speech from discrete speech units. [More details](unit2speech) - -## Metrics -We show how to compute ASR based metrics as well as zero-shot metrics proposed in our paper [here](metrics). - -## Tools -We share two tools to resynthesize a given spoken utterance, and generate novel spoken language given a spoken prompt. [More detail](tools) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py deleted file mode 100644 index 70d0016663b7d0b90033f4eb301b527f2c92a3f8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -import sys - -import soundfile as sf -import torch -import torchaudio - -from feature_utils import get_path_iterator, dump_feature - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) -logger = logging.getLogger("dump_mfcc_feature") - - -class MfccFeatureReader(object): - def __init__(self, sample_rate): - self.sample_rate = sample_rate - - def read_audio(self, path, ref_len=None): - wav, sr = sf.read(path) - assert sr == self.sample_rate, sr - if wav.ndim == 2: - wav = wav.mean(-1) - assert wav.ndim == 1, wav.ndim - if ref_len is not None and abs(ref_len - len(wav)) > 160: - logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") - return wav - - def get_feats(self, path, ref_len=None): - x = self.read_audio(path, ref_len) - with torch.no_grad(): - x = torch.from_numpy(x).float() - x = x.view(1, -1) - - mfccs = torchaudio.compliance.kaldi.mfcc( - waveform=x, - sample_frequency=self.sample_rate, - use_energy=False, - ) # (time, freq) - mfccs = mfccs.transpose(0, 1) # (freq, time) - deltas = torchaudio.functional.compute_deltas(mfccs) - ddeltas = torchaudio.functional.compute_deltas(deltas) - concat = torch.cat([mfccs, deltas, ddeltas], dim=0) - concat = concat.transpose(0, 1).contiguous() # (freq, time) - return concat - - -def main(tsv_dir, split, nshard, rank, feat_dir, sample_rate): - reader = MfccFeatureReader(sample_rate) - generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) - dump_feature(reader, generator, num, split, nshard, rank, feat_dir) - - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("tsv_dir") - parser.add_argument("split") - parser.add_argument("nshard", type=int) - parser.add_argument("rank", type=int) - parser.add_argument("feat_dir") - parser.add_argument("--sample_rate", type=int, default=16000) - args = parser.parse_args() - logger.info(args) - - main(**vars(args)) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/nonautoregressive_translation/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/nonautoregressive_translation/README.md deleted file mode 100644 index 8793e225c99732c42c9c19e22075cde37c73341d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/nonautoregressive_translation/README.md +++ /dev/null @@ -1,146 +0,0 @@ -# Non-autoregressive Neural Machine Translation (NAT) - -This page mainly includes instructions for reproducing results from the following papers -* [Levenshtein Transformer (Gu et al., 2019)](https://arxiv.org/abs/1905.11006). -* [Understanding Knowledge Distillation in Non-autoregressive Machine Translation (Zhou et al., 2019)](https://arxiv.org/abs/1911.02727). - -We also provided our own implementations for several popular non-autoregressive-based models as reference:
-* [Non-Autoregressive Neural Machine Translation (Gu et al., 2017)](https://arxiv.org/abs/1711.02281)
-* [Deterministic Non-Autoregressive Neural Sequence Modeling by Iterative Refinement (Lee et al., 2018)](https://arxiv.org/abs/1802.06901)
-* [Insertion Transformer: Flexible Sequence Generation via Insertion Operations (Stern et al., 2019)](https://arxiv.org/abs/1902.03249)
-* [Mask-Predict: Parallel Decoding of Conditional Masked Language Models (Ghazvininejad et al., 2019)](https://arxiv.org/abs/1904.09324v2)
-* [Fast Structured Decoding for Sequence Models (Sun et al., 2019)](https://arxiv.org/abs/1910.11555) - -## Dataset - -First, follow the [instructions to download and preprocess the WMT'14 En-De dataset](../translation#wmt14-english-to-german-convolutional). -Make sure to learn a joint vocabulary by passing the `--joined-dictionary` option to `fairseq-preprocess`. - -### Knowledge Distillation -Following [Gu et al. 2019](https://arxiv.org/abs/1905.11006), [knowledge distillation](https://arxiv.org/abs/1606.07947) from an autoregressive model can effectively simplify the training data distribution, which is sometimes essential for NAT-based models to learn good translations. -The easiest way of performing distillation is to follow the [instructions of training a standard transformer model](../translation) on the same data, and then decode the training set to produce a distillation dataset for NAT. - -### Download -We also provided the preprocessed [original](http://dl.fbaipublicfiles.com/nat/original_dataset.zip) and [distillation](http://dl.fbaipublicfiles.com/nat/distill_dataset.zip) datasets. Please build the binarized dataset on your own. - - -## Train a model - -Then we can train a nonautoregressive model using the `translation_lev` task and a new criterion `nat_loss`. -Use the `--noise` flag to specify the input noise used on the target sentences. -In default, we run the task for *Levenshtein Transformer*, with `--noise='random_delete'`. Full scripts to run other models can also be found [here](./scripts.md). - -The following command will train a *Levenshtein Transformer* on the binarized dataset. - -```bash -fairseq-train \ - data-bin/wmt14_en_de_distill \ - --save-dir checkpoints \ - --ddp-backend=legacy_ddp \ - --task translation_lev \ - --criterion nat_loss \ - --arch levenshtein_transformer \ - --noise random_delete \ - --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9,0.98)' \ - --lr 0.0005 --lr-scheduler inverse_sqrt \ - --stop-min-lr '1e-09' --warmup-updates 10000 \ - --warmup-init-lr '1e-07' --label-smoothing 0.1 \ - --dropout 0.3 --weight-decay 0.01 \ - --decoder-learned-pos \ - --encoder-learned-pos \ - --apply-bert-init \ - --log-format 'simple' --log-interval 100 \ - --fixed-validation-seed 7 \ - --max-tokens 8000 \ - --save-interval-updates 10000 \ - --max-update 300000 -``` - -## Translate - -Once a model is trained, we can generate translations using an `iterative_refinement_generator` which will based on the model's initial output and iteratively read and greedily refine the translation until (1) the model predicts the same translations for two consecutive iterations; or (2) the generator reaches the maximum iterations (`--iter-decode-max-iter`). Use `--print-step` to check the actual # of iteration for each sentence. - -For *Levenshtein Transformer*, it sometimes helps to apply a `--iter-decode-eos-penalty` (typically, 0~3) to penalize the model finishing generation too early and generating too short translations. - -For example, to generate with `--iter-decode-max-iter=9`: -```bash -fairseq-generate \ - data-bin/wmt14_en_de_distill \ - --gen-subset test \ - --task translation_lev \ - --path checkpoints/checkpoint_best.pt \ - --iter-decode-max-iter 9 \ - --iter-decode-eos-penalty 0 \ - --beam 1 --remove-bpe \ - --print-step \ - --batch-size 400 -``` -In the end of the generation, we can see the tokenized BLEU score for the translation. - -## Advanced Decoding Methods -### Ensemble -The NAT models use special implementations of [ensembling](https://github.com/fairinternal/fairseq-py/blob/b98d88da52f2f21f1b169bab8c70c1c4ca19a768/fairseq/sequence_generator.py#L522) to support iterative refinement and a variety of parallel operations in different models, while it shares the same API as standard autoregressive models as follows: -```bash -fairseq-generate \ - data-bin/wmt14_en_de_distill \ - --gen-subset test \ - --task translation_lev \ - --path checkpoint_1.pt:checkpoint_2.pt:checkpoint_3.pt \ - --iter-decode-max-iter 9 \ - --iter-decode-eos-penalty 0 \ - --beam 1 --remove-bpe \ - --print-step \ - --batch-size 400 -``` -We use ``:`` to split multiple models. Note that, not all NAT models support ensembling for now. - - -### Length-beam -For models that predict lengths before decoding (e.g. the vanilla NAT, Mask-Predict, etc), it is possible to improve the translation quality by varying the target lengths around the predicted value, and translating the same example multiple times in parallel. We can select the best translation with the highest scores defined by your model's output. - -Note that, not all models support length beams. For models which dynamically change the lengths (e.g. *Insertion Transformer*, *Levenshtein Transformer*), the same trick does not apply. - -### Re-ranking -If the model generates multiple translations with length beam, we can also introduce an autoregressive model to rerank the translations considering scoring from an autoregressive model is much faster than decoding from that. - -For example, to generate translations with length beam and reranking, -```bash -fairseq-generate \ - data-bin/wmt14_en_de_distill \ - --gen-subset test \ - --task translation_lev \ - --path checkpoints/checkpoint_best.pt:at_checkpoints/checkpoint_best.pt \ - --iter-decode-max-iter 9 \ - --iter-decode-eos-penalty 0 \ - --iter-decode-with-beam 9 \ - --iter-decode-with-external-reranker \ - --beam 1 --remove-bpe \ - --print-step \ - --batch-size 100 -``` -Note that we need to make sure the autoregressive model shares the same vocabulary as our target non-autoregressive model. - - -## Citation - -```bibtex -@incollection{NIPS2019_9297, - title = {Levenshtein Transformer}, - author = {Gu, Jiatao and Wang, Changhan and Zhao, Junbo}, - booktitle = {Advances in Neural Information Processing Systems 32}, - editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett}, - pages = {11179--11189}, - year = {2019}, - publisher = {Curran Associates, Inc.}, - url = {http://papers.nips.cc/paper/9297-levenshtein-transformer.pdf} -} -``` -```bibtex -@article{zhou2019understanding, - title={Understanding Knowledge Distillation in Non-autoregressive Machine Translation}, - author={Zhou, Chunting and Neubig, Graham and Gu, Jiatao}, - journal={arXiv preprint arXiv:1911.02727}, - year={2019} -} -``` diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/lstm.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/lstm.py deleted file mode 100644 index e1e66a7d50fa1b1b313e9d1a6e7862ac9bfaa074..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/lstm.py +++ /dev/null @@ -1,753 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Dict, List, Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.models import ( - FairseqEncoder, - FairseqEncoderDecoderModel, - FairseqIncrementalDecoder, - register_model, - register_model_architecture, -) -from fairseq.modules import AdaptiveSoftmax, FairseqDropout -from torch import Tensor - - -DEFAULT_MAX_SOURCE_POSITIONS = 1e5 -DEFAULT_MAX_TARGET_POSITIONS = 1e5 - - -@register_model("lstm") -class LSTMModel(FairseqEncoderDecoderModel): - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--dropout', type=float, metavar='D', - help='dropout probability') - parser.add_argument('--encoder-embed-dim', type=int, metavar='N', - help='encoder embedding dimension') - parser.add_argument('--encoder-embed-path', type=str, metavar='STR', - help='path to pre-trained encoder embedding') - parser.add_argument('--encoder-freeze-embed', action='store_true', - help='freeze encoder embeddings') - parser.add_argument('--encoder-hidden-size', type=int, metavar='N', - help='encoder hidden size') - parser.add_argument('--encoder-layers', type=int, metavar='N', - help='number of encoder layers') - parser.add_argument('--encoder-bidirectional', action='store_true', - help='make all layers of encoder bidirectional') - parser.add_argument('--decoder-embed-dim', type=int, metavar='N', - help='decoder embedding dimension') - parser.add_argument('--decoder-embed-path', type=str, metavar='STR', - help='path to pre-trained decoder embedding') - parser.add_argument('--decoder-freeze-embed', action='store_true', - help='freeze decoder embeddings') - parser.add_argument('--decoder-hidden-size', type=int, metavar='N', - help='decoder hidden size') - parser.add_argument('--decoder-layers', type=int, metavar='N', - help='number of decoder layers') - parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', - help='decoder output embedding dimension') - parser.add_argument('--decoder-attention', type=str, metavar='BOOL', - help='decoder attention') - parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', - help='comma separated list of adaptive softmax cutoff points. ' - 'Must be used with adaptive_loss criterion') - parser.add_argument('--share-decoder-input-output-embed', default=False, - action='store_true', - help='share decoder input and output embeddings') - parser.add_argument('--share-all-embeddings', default=False, action='store_true', - help='share encoder, decoder and output embeddings' - ' (requires shared dictionary and embed dim)') - - # Granular dropout settings (if not specified these default to --dropout) - parser.add_argument('--encoder-dropout-in', type=float, metavar='D', - help='dropout probability for encoder input embedding') - parser.add_argument('--encoder-dropout-out', type=float, metavar='D', - help='dropout probability for encoder output') - parser.add_argument('--decoder-dropout-in', type=float, metavar='D', - help='dropout probability for decoder input embedding') - parser.add_argument('--decoder-dropout-out', type=float, metavar='D', - help='dropout probability for decoder output') - # fmt: on - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - # make sure that all args are properly defaulted (in case there are any new ones) - base_architecture(args) - - if args.encoder_layers != args.decoder_layers: - raise ValueError("--encoder-layers must match --decoder-layers") - - max_source_positions = getattr( - args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS - ) - max_target_positions = getattr( - args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS - ) - - def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) - embed_dict = utils.parse_embedding(embed_path) - utils.print_embed_overlap(embed_dict, dictionary) - return utils.load_embedding(embed_dict, dictionary, embed_tokens) - - if args.encoder_embed_path: - pretrained_encoder_embed = load_pretrained_embedding_from_file( - args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim - ) - else: - num_embeddings = len(task.source_dictionary) - pretrained_encoder_embed = Embedding( - num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad() - ) - - if args.share_all_embeddings: - # double check all parameters combinations are valid - if task.source_dictionary != task.target_dictionary: - raise ValueError("--share-all-embeddings requires a joint dictionary") - if args.decoder_embed_path and ( - args.decoder_embed_path != args.encoder_embed_path - ): - raise ValueError( - "--share-all-embed not compatible with --decoder-embed-path" - ) - if args.encoder_embed_dim != args.decoder_embed_dim: - raise ValueError( - "--share-all-embeddings requires --encoder-embed-dim to " - "match --decoder-embed-dim" - ) - pretrained_decoder_embed = pretrained_encoder_embed - args.share_decoder_input_output_embed = True - else: - # separate decoder input embeddings - pretrained_decoder_embed = None - if args.decoder_embed_path: - pretrained_decoder_embed = load_pretrained_embedding_from_file( - args.decoder_embed_path, - task.target_dictionary, - args.decoder_embed_dim, - ) - # one last double check of parameter combinations - if args.share_decoder_input_output_embed and ( - args.decoder_embed_dim != args.decoder_out_embed_dim - ): - raise ValueError( - "--share-decoder-input-output-embeddings requires " - "--decoder-embed-dim to match --decoder-out-embed-dim" - ) - - if args.encoder_freeze_embed: - pretrained_encoder_embed.weight.requires_grad = False - if args.decoder_freeze_embed: - pretrained_decoder_embed.weight.requires_grad = False - - encoder = LSTMEncoder( - dictionary=task.source_dictionary, - embed_dim=args.encoder_embed_dim, - hidden_size=args.encoder_hidden_size, - num_layers=args.encoder_layers, - dropout_in=args.encoder_dropout_in, - dropout_out=args.encoder_dropout_out, - bidirectional=args.encoder_bidirectional, - pretrained_embed=pretrained_encoder_embed, - max_source_positions=max_source_positions, - ) - decoder = LSTMDecoder( - dictionary=task.target_dictionary, - embed_dim=args.decoder_embed_dim, - hidden_size=args.decoder_hidden_size, - out_embed_dim=args.decoder_out_embed_dim, - num_layers=args.decoder_layers, - dropout_in=args.decoder_dropout_in, - dropout_out=args.decoder_dropout_out, - attention=utils.eval_bool(args.decoder_attention), - encoder_output_units=encoder.output_units, - pretrained_embed=pretrained_decoder_embed, - share_input_output_embed=args.share_decoder_input_output_embed, - adaptive_softmax_cutoff=( - utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) - if args.criterion == "adaptive_loss" - else None - ), - max_target_positions=max_target_positions, - residuals=False, - ) - return cls(encoder, decoder) - - def forward( - self, - src_tokens, - src_lengths, - prev_output_tokens, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - ): - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths) - decoder_out = self.decoder( - prev_output_tokens, - encoder_out=encoder_out, - incremental_state=incremental_state, - ) - return decoder_out - - -class LSTMEncoder(FairseqEncoder): - """LSTM encoder.""" - - def __init__( - self, - dictionary, - embed_dim=512, - hidden_size=512, - num_layers=1, - dropout_in=0.1, - dropout_out=0.1, - bidirectional=False, - left_pad=True, - pretrained_embed=None, - padding_idx=None, - max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS, - ): - super().__init__(dictionary) - self.num_layers = num_layers - self.dropout_in_module = FairseqDropout( - dropout_in*1.0, module_name=self.__class__.__name__ - ) - self.dropout_out_module = FairseqDropout( - dropout_out*1.0, module_name=self.__class__.__name__ - ) - self.bidirectional = bidirectional - self.hidden_size = hidden_size - self.max_source_positions = max_source_positions - - num_embeddings = len(dictionary) - self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad() - if pretrained_embed is None: - self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) - else: - self.embed_tokens = pretrained_embed - - self.lstm = LSTM( - input_size=embed_dim, - hidden_size=hidden_size, - num_layers=num_layers, - dropout=self.dropout_out_module.p if num_layers > 1 else 0.0, - bidirectional=bidirectional, - ) - self.left_pad = left_pad - - self.output_units = hidden_size - if bidirectional: - self.output_units *= 2 - - def forward( - self, - src_tokens: Tensor, - src_lengths: Tensor, - enforce_sorted: bool = True, - ): - """ - Args: - src_tokens (LongTensor): tokens in the source language of - shape `(batch, src_len)` - src_lengths (LongTensor): lengths of each source sentence of - shape `(batch)` - enforce_sorted (bool, optional): if True, `src_tokens` is - expected to contain sequences sorted by length in a - decreasing order. If False, this condition is not - required. Default: True. - """ - if self.left_pad: - # nn.utils.rnn.pack_padded_sequence requires right-padding; - # convert left-padding to right-padding - src_tokens = utils.convert_padding_direction( - src_tokens, - torch.zeros_like(src_tokens).fill_(self.padding_idx), - left_to_right=True, - ) - - bsz, seqlen = src_tokens.size() - - # embed tokens - x = self.embed_tokens(src_tokens) - x = self.dropout_in_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # pack embedded source tokens into a PackedSequence - packed_x = nn.utils.rnn.pack_padded_sequence( - x, src_lengths.cpu(), enforce_sorted=enforce_sorted - ) - - # apply LSTM - if self.bidirectional: - state_size = 2 * self.num_layers, bsz, self.hidden_size - else: - state_size = self.num_layers, bsz, self.hidden_size - h0 = x.new_zeros(*state_size) - c0 = x.new_zeros(*state_size) - packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) - - # unpack outputs and apply dropout - x, _ = nn.utils.rnn.pad_packed_sequence( - packed_outs, padding_value=self.padding_idx * 1.0 - ) - x = self.dropout_out_module(x) - assert list(x.size()) == [seqlen, bsz, self.output_units] - - if self.bidirectional: - final_hiddens = self.combine_bidir(final_hiddens, bsz) - final_cells = self.combine_bidir(final_cells, bsz) - - encoder_padding_mask = src_tokens.eq(self.padding_idx).t() - - return tuple( - ( - x, # seq_len x batch x hidden - final_hiddens, # num_layers x batch x num_directions*hidden - final_cells, # num_layers x batch x num_directions*hidden - encoder_padding_mask, # seq_len x batch - ) - ) - - def combine_bidir(self, outs, bsz: int): - out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous() - return out.view(self.num_layers, bsz, -1) - - def reorder_encoder_out(self, encoder_out: Tuple[Tensor, Tensor, Tensor, Tensor], new_order): - return tuple( - ( - encoder_out[0].index_select(1, new_order), - encoder_out[1].index_select(1, new_order), - encoder_out[2].index_select(1, new_order), - encoder_out[3].index_select(1, new_order), - ) - ) - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return self.max_source_positions - - -class AttentionLayer(nn.Module): - def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False): - super().__init__() - - self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias) - self.output_proj = Linear( - input_embed_dim + source_embed_dim, output_embed_dim, bias=bias - ) - - def forward(self, input, source_hids, encoder_padding_mask): - # input: bsz x input_embed_dim - # source_hids: srclen x bsz x source_embed_dim - - # x: bsz x source_embed_dim - x = self.input_proj(input) - - # compute attention - attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2) - - # don't attend over padding - if encoder_padding_mask is not None: - attn_scores = ( - attn_scores.float() - .masked_fill_(encoder_padding_mask, float("-inf")) - .type_as(attn_scores) - ) # FP16 support: cast to float and back - - attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz - - # sum weighted sources - x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0) - - x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1))) - return x, attn_scores - - -class LSTMDecoder(FairseqIncrementalDecoder): - """LSTM decoder.""" - - def __init__( - self, - dictionary, - embed_dim=512, - hidden_size=512, - out_embed_dim=512, - num_layers=1, - dropout_in=0.1, - dropout_out=0.1, - attention=True, - encoder_output_units=512, - pretrained_embed=None, - share_input_output_embed=False, - adaptive_softmax_cutoff=None, - max_target_positions=DEFAULT_MAX_TARGET_POSITIONS, - residuals=False, - ): - super().__init__(dictionary) - self.dropout_in_module = FairseqDropout( - dropout_in*1.0, module_name=self.__class__.__name__ - ) - self.dropout_out_module = FairseqDropout( - dropout_out*1.0, module_name=self.__class__.__name__ - ) - self.hidden_size = hidden_size - self.share_input_output_embed = share_input_output_embed - self.need_attn = True - self.max_target_positions = max_target_positions - self.residuals = residuals - self.num_layers = num_layers - - self.adaptive_softmax = None - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - if pretrained_embed is None: - self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) - else: - self.embed_tokens = pretrained_embed - - self.encoder_output_units = encoder_output_units - if encoder_output_units != hidden_size and encoder_output_units != 0: - self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size) - self.encoder_cell_proj = Linear(encoder_output_units, hidden_size) - else: - self.encoder_hidden_proj = self.encoder_cell_proj = None - - # disable input feeding if there is no encoder - # input feeding is described in arxiv.org/abs/1508.04025 - input_feed_size = 0 if encoder_output_units == 0 else hidden_size - self.layers = nn.ModuleList( - [ - LSTMCell( - input_size=input_feed_size + embed_dim - if layer == 0 - else hidden_size, - hidden_size=hidden_size, - ) - for layer in range(num_layers) - ] - ) - - if attention: - # TODO make bias configurable - self.attention = AttentionLayer( - hidden_size, encoder_output_units, hidden_size, bias=False - ) - else: - self.attention = None - - if hidden_size != out_embed_dim: - self.additional_fc = Linear(hidden_size, out_embed_dim) - - if adaptive_softmax_cutoff is not None: - # setting adaptive_softmax dropout to dropout_out for now but can be redefined - self.adaptive_softmax = AdaptiveSoftmax( - num_embeddings, - hidden_size, - adaptive_softmax_cutoff, - dropout=dropout_out, - ) - elif not self.share_input_output_embed: - self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) - - def forward( - self, - prev_output_tokens, - encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - src_lengths: Optional[Tensor] = None, - ): - x, attn_scores = self.extract_features( - prev_output_tokens, encoder_out, incremental_state - ) - return self.output_layer(x), attn_scores - - def extract_features( - self, - prev_output_tokens, - encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - ): - """ - Similar to *forward* but only return features. - """ - # get outputs from encoder - if encoder_out is not None: - encoder_outs = encoder_out[0] - encoder_hiddens = encoder_out[1] - encoder_cells = encoder_out[2] - encoder_padding_mask = encoder_out[3] - else: - encoder_outs = torch.empty(0) - encoder_hiddens = torch.empty(0) - encoder_cells = torch.empty(0) - encoder_padding_mask = torch.empty(0) - srclen = encoder_outs.size(0) - - if incremental_state is not None and len(incremental_state) > 0: - prev_output_tokens = prev_output_tokens[:, -1:] - - bsz, seqlen = prev_output_tokens.size() - - # embed tokens - x = self.embed_tokens(prev_output_tokens) - x = self.dropout_in_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # initialize previous states (or get from cache during incremental generation) - if incremental_state is not None and len(incremental_state) > 0: - prev_hiddens, prev_cells, input_feed = self.get_cached_state( - incremental_state - ) - elif encoder_out is not None: - # setup recurrent cells - prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)] - prev_cells = [encoder_cells[i] for i in range(self.num_layers)] - if self.encoder_hidden_proj is not None: - prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens] - prev_cells = [self.encoder_cell_proj(y) for y in prev_cells] - input_feed = x.new_zeros(bsz, self.hidden_size) - else: - # setup zero cells, since there is no encoder - zero_state = x.new_zeros(bsz, self.hidden_size) - prev_hiddens = [zero_state for i in range(self.num_layers)] - prev_cells = [zero_state for i in range(self.num_layers)] - input_feed = None - - assert ( - srclen > 0 or self.attention is None - ), "attention is not supported if there are no encoder outputs" - attn_scores: Optional[Tensor] = ( - x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None - ) - outs = [] - for j in range(seqlen): - # input feeding: concatenate context vector from previous time step - if input_feed is not None: - input = torch.cat((x[j, :, :], input_feed), dim=1) - else: - input = x[j] - - for i, rnn in enumerate(self.layers): - # recurrent cell - hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) - - # hidden state becomes the input to the next layer - input = self.dropout_out_module(hidden) - if self.residuals: - input = input + prev_hiddens[i] - - # save state for next time step - prev_hiddens[i] = hidden - prev_cells[i] = cell - - # apply attention using the last layer's hidden state - if self.attention is not None: - assert attn_scores is not None - out, attn_scores[:, j, :] = self.attention( - hidden, encoder_outs, encoder_padding_mask - ) - else: - out = hidden - out = self.dropout_out_module(out) - - # input feeding - if input_feed is not None: - input_feed = out - - # save final output - outs.append(out) - - # Stack all the necessary tensors together and store - prev_hiddens_tensor = torch.stack(prev_hiddens) - prev_cells_tensor = torch.stack(prev_cells) - cache_state = torch.jit.annotate( - Dict[str, Optional[Tensor]], - { - "prev_hiddens": prev_hiddens_tensor, - "prev_cells": prev_cells_tensor, - "input_feed": input_feed, - }, - ) - self.set_incremental_state(incremental_state, "cached_state", cache_state) - - # collect outputs across time steps - x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) - - # T x B x C -> B x T x C - x = x.transpose(1, 0) - - if hasattr(self, "additional_fc") and self.adaptive_softmax is None: - x = self.additional_fc(x) - x = self.dropout_out_module(x) - # srclen x tgtlen x bsz -> bsz x tgtlen x srclen - if not self.training and self.need_attn and self.attention is not None: - assert attn_scores is not None - attn_scores = attn_scores.transpose(0, 2) - else: - attn_scores = None - return x, attn_scores - - def output_layer(self, x): - """Project features to the vocabulary size.""" - if self.adaptive_softmax is None: - if self.share_input_output_embed: - x = F.linear(x, self.embed_tokens.weight) - else: - x = self.fc_out(x) - return x - - def get_cached_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - ) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]: - cached_state = self.get_incremental_state(incremental_state, "cached_state") - assert cached_state is not None - prev_hiddens_ = cached_state["prev_hiddens"] - assert prev_hiddens_ is not None - prev_cells_ = cached_state["prev_cells"] - assert prev_cells_ is not None - prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)] - prev_cells = [prev_cells_[j] for j in range(self.num_layers)] - input_feed = cached_state[ - "input_feed" - ] # can be None for decoder-only language models - return prev_hiddens, prev_cells, input_feed - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - new_order: Tensor, - ): - if incremental_state is None or len(incremental_state) == 0: - return - prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state) - prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens] - prev_cells = [p.index_select(0, new_order) for p in prev_cells] - if input_feed is not None: - input_feed = input_feed.index_select(0, new_order) - cached_state_new = torch.jit.annotate( - Dict[str, Optional[Tensor]], - { - "prev_hiddens": torch.stack(prev_hiddens), - "prev_cells": torch.stack(prev_cells), - "input_feed": input_feed, - }, - ) - self.set_incremental_state(incremental_state, "cached_state", cached_state_new), - return - - def max_positions(self): - """Maximum output length supported by the decoder.""" - return self.max_target_positions - - def make_generation_fast_(self, need_attn=False, **kwargs): - self.need_attn = need_attn - - -def Embedding(num_embeddings, embedding_dim, padding_idx): - m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) - nn.init.uniform_(m.weight, -0.1, 0.1) - nn.init.constant_(m.weight[padding_idx], 0) - return m - - -def LSTM(input_size, hidden_size, **kwargs): - m = nn.LSTM(input_size, hidden_size, **kwargs) - for name, param in m.named_parameters(): - if "weight" in name or "bias" in name: - param.data.uniform_(-0.1, 0.1) - return m - - -def LSTMCell(input_size, hidden_size, **kwargs): - m = nn.LSTMCell(input_size, hidden_size, **kwargs) - for name, param in m.named_parameters(): - if "weight" in name or "bias" in name: - param.data.uniform_(-0.1, 0.1) - return m - - -def Linear(in_features, out_features, bias=True, dropout=0.0): - """Linear layer (input: N x T x C)""" - m = nn.Linear(in_features, out_features, bias=bias) - m.weight.data.uniform_(-0.1, 0.1) - if bias: - m.bias.data.uniform_(-0.1, 0.1) - return m - - -@register_model_architecture("lstm", "lstm") -def base_architecture(args): - args.dropout = getattr(args, "dropout", 0.1) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False) - args.encoder_hidden_size = getattr( - args, "encoder_hidden_size", args.encoder_embed_dim - ) - args.encoder_layers = getattr(args, "encoder_layers", 1) - args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False) - args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False) - args.decoder_hidden_size = getattr( - args, "decoder_hidden_size", args.decoder_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 1) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) - args.decoder_attention = getattr(args, "decoder_attention", "1") - args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", False) - args.adaptive_softmax_cutoff = getattr( - args, "adaptive_softmax_cutoff", "10000,50000,200000" - ) - - -@register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en") -def lstm_wiseman_iwslt_de_en(args): - args.dropout = getattr(args, "dropout", 0.1) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) - args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) - args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) - base_architecture(args) - - -@register_model_architecture("lstm", "lstm_luong_wmt_en_de") -def lstm_luong_wmt_en_de(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000) - args.encoder_layers = getattr(args, "encoder_layers", 4) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000) - args.decoder_layers = getattr(args, "decoder_layers", 4) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0) - base_architecture(args) diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/models/__init__.py b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/models/__init__.py deleted file mode 100644 index c593ea5f1842794bfcc952fc93c679a5f16aeb98..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .discriminative_reranking_model import DiscriminativeNMTReranker - - -__all__ = [ - "DiscriminativeNMTReranker", -] diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py deleted file mode 100644 index b41bfbe38789ba14e6a5ea938c75d761424c00ab..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -import argparse -import glob - -import numpy as np - - -DIM = 1024 - - -def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False): - target_ids = [tid for tid in target_embs] - source_mat = np.stack(source_embs.values(), axis=0) - normalized_source_mat = source_mat / np.linalg.norm( - source_mat, axis=1, keepdims=True - ) - target_mat = np.stack(target_embs.values(), axis=0) - normalized_target_mat = target_mat / np.linalg.norm( - target_mat, axis=1, keepdims=True - ) - sim_mat = normalized_source_mat.dot(normalized_target_mat.T) - if return_sim_mat: - return sim_mat - neighbors_map = {} - for i, sentence_id in enumerate(source_embs): - idx = np.argsort(sim_mat[i, :])[::-1][:k] - neighbors_map[sentence_id] = [target_ids[tid] for tid in idx] - return neighbors_map - - -def load_embeddings(directory, LANGS): - sentence_embeddings = {} - sentence_texts = {} - for lang in LANGS: - sentence_embeddings[lang] = {} - sentence_texts[lang] = {} - lang_dir = f"{directory}/{lang}" - embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*") - for embed_file in embedding_files: - shard_id = embed_file.split(".")[-1] - embeddings = np.fromfile(embed_file, dtype=np.float32) - num_rows = embeddings.shape[0] // DIM - embeddings = embeddings.reshape((num_rows, DIM)) - - with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file: - for idx, line in enumerate(sentence_file): - sentence_id, sentence = line.strip().split("\t") - sentence_texts[lang][sentence_id] = sentence - sentence_embeddings[lang][sentence_id] = embeddings[idx, :] - - return sentence_embeddings, sentence_texts - - -def compute_accuracy(directory, LANGS): - sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS) - - top_1_accuracy = {} - - top1_str = " ".join(LANGS) + "\n" - for source_lang in LANGS: - top_1_accuracy[source_lang] = {} - top1_str += f"{source_lang} " - for target_lang in LANGS: - top1 = 0 - top5 = 0 - neighbors_map = compute_dist( - sentence_embeddings[source_lang], sentence_embeddings[target_lang] - ) - for sentence_id, neighbors in neighbors_map.items(): - if sentence_id == neighbors[0]: - top1 += 1 - if sentence_id in neighbors[:5]: - top5 += 1 - n = len(sentence_embeddings[target_lang]) - top1_str += f"{top1/n} " - top1_str += "\n" - - print(top1_str) - print(top1_str, file=open(f"{directory}/accuracy", "w")) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Analyze encoder outputs") - parser.add_argument("directory", help="Source language corpus") - parser.add_argument("--langs", help="List of langs") - args = parser.parse_args() - langs = args.langs.split(",") - compute_accuracy(args.directory, langs) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_multihead_attention.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_multihead_attention.py deleted file mode 100644 index 3cbd9d6785886e319aab0601517e27df733b6f97..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_multihead_attention.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import torch - -from .multihead_attention import MultiheadAttention - - -class SparseMultiheadAttention(MultiheadAttention): - """Sparse Multi-Headed Attention. - - "Generating Long Sequences with Sparse Transformers". Implements - fixed factorized self attention, where l=stride and c=expressivity. - A(1) includes all words in the stride window and A(2) takes a summary of c - words from the end of each stride window. - If is_bidirectional=False, we do not include any words past the current word, - as in the paper. - """ - - def __init__( - self, - embed_dim, - num_heads, - kdim=None, - vdim=None, - dropout=0.0, - bias=True, - add_bias_kv=False, - add_zero_attn=False, - self_attention=False, - encoder_decoder_attention=False, - stride=32, - expressivity=8, - is_bidirectional=True, - ): - - super().__init__( - embed_dim, - num_heads, - kdim, - vdim, - dropout, - bias, - add_bias_kv, - add_zero_attn, - self_attention, - encoder_decoder_attention, - ) - - self.is_bidirectional = is_bidirectional - self.stride = stride - self.expressivity = expressivity - assert self.stride > 0 and self.stride >= self.expressivity - - # Used for Ai(2) calculations - beginning of [l-c, l] range - def compute_checkpoint(self, word_index): - if word_index % self.stride == 0 and word_index != 0: - checkpoint_index = word_index - self.expressivity - else: - checkpoint_index = ( - math.floor(word_index / self.stride) * self.stride - + self.stride - - self.expressivity - ) - return checkpoint_index - - # Computes Ai(2) - def compute_subset_summaries(self, absolute_max): - checkpoint_index = self.compute_checkpoint(0) - subset_two = set() - while checkpoint_index <= absolute_max - 1: - summary = set( - range( - checkpoint_index, - min(checkpoint_index + self.expressivity + 1, absolute_max), - ) - ) - subset_two = subset_two.union(summary) - checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride) - return subset_two - - # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf - def compute_fixed_attention_subset(self, word_index, tgt_len): - # +1s account for range function; [min, max) -> [min, max] - if not self.is_bidirectional: - absolute_max = word_index + 1 - else: - absolute_max = tgt_len - - # Subset 1 - whole window - rounded_index = ( - math.floor((word_index + self.stride) / self.stride) * self.stride - ) - if word_index % self.stride == 0 and word_index != 0: - subset_one = set( - range(word_index - self.stride, min(absolute_max, word_index + 1)) - ) - else: - subset_one = set( - range( - max(0, rounded_index - self.stride), - min(absolute_max, rounded_index + 1), - ) - ) - - # Subset 2 - summary per window - # If bidirectional, subset 2 is the same for every index - subset_two = set() - if not self.is_bidirectional: - subset_two = self.compute_subset_summaries(absolute_max) - - return subset_one.union(subset_two) - - # Compute sparse mask - if bidirectional, can pre-compute and store - def buffered_sparse_mask(self, tensor, tgt_len, src_len): - assert tgt_len > self.stride - sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf")) - - # If bidirectional, subset 2 is the same for every index - subset_summaries = set() - if self.is_bidirectional: - subset_summaries = self.compute_subset_summaries(tgt_len) - - for i in range(tgt_len): - fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len) - fixed_attention_subset = fixed_attention_subset.union(subset_summaries) - included_word_indices = torch.LongTensor(list(fixed_attention_subset)) - sparse_mask[i].index_fill_(0, included_word_indices, 0) - return sparse_mask.type_as(tensor) - - def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): - sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len) - sparse_mask = sparse_mask.unsqueeze(0).expand( - bsz * self.num_heads, tgt_len, src_len - ) - attn_weights += sparse_mask diff --git a/spaces/Omnibus/MusicGen/tests/modules/test_rope.py b/spaces/Omnibus/MusicGen/tests/modules/test_rope.py deleted file mode 100644 index 067c6f067acbf27fb0fef5c2b812c22474c4fcd0..0000000000000000000000000000000000000000 --- a/spaces/Omnibus/MusicGen/tests/modules/test_rope.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from audiocraft.modules.rope import RotaryEmbedding -from audiocraft.modules.transformer import StreamingTransformer, set_efficient_attention_backend - - -def test_rope(): - set_efficient_attention_backend('xformers') - B, T, H, C = 8, 75, 16, 128 - - rope = RotaryEmbedding(dim=C) - xq = torch.rand((B, T, H, C)) - xk = torch.rand((B, T, H, C)) - xq_out, xk_out = rope.rotate_qk(xq, xk, start=7) - - assert list(xq_out.shape) == [B, T, H, C] - assert list(xk_out.shape) == [B, T, H, C] - - -def test_rope_io_dtypes(): - set_efficient_attention_backend('xformers') - B, T, H, C = 8, 75, 16, 128 - - rope_32 = RotaryEmbedding(dim=C, dtype=torch.float32) - rope_64 = RotaryEmbedding(dim=C, dtype=torch.float64) - - # Test bfloat16 inputs w/ both 32 and 64 precision rope. - xq_16 = torch.rand((B, T, H, C)).to(torch.bfloat16) - xk_16 = torch.rand((B, T, H, C)).to(torch.bfloat16) - xq_out, xk_out = rope_32.rotate_qk(xq_16, xk_16) - assert xq_out.dtype == torch.bfloat16 - xq_out, xk_out = rope_64.rotate_qk(xq_16, xk_16) - assert xq_out.dtype == torch.bfloat16 - - # Test float32 inputs w/ both 32 and 64 precision rope. - xq_32 = torch.rand((B, T, H, C)).to(torch.float32) - xk_32 = torch.rand((B, T, H, C)).to(torch.float32) - xq_out, xk_out = rope_32.rotate_qk(xq_32, xk_32) - assert xq_out.dtype == torch.float32 - xq_out, xk_out = rope_64.rotate_qk(xq_32, xk_32) - assert xq_out.dtype == torch.float32 - - -def test_transformer_with_rope(): - set_efficient_attention_backend('xformers') - torch.manual_seed(1234) - for pos in ['rope', 'sin_rope']: - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1, - positional_embedding=pos) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - out = tr(x) - assert list(out.shape) == list(x.shape) - - -@torch.no_grad() -def test_rope_streaming(): - set_efficient_attention_backend('xformers') - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, causal=True, dropout=0., - custom=True, positional_embedding='rope') - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - ref = tr(x) - - with tr.streaming(): - outs = [] - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr(frame)) - - out = torch.cat(outs, dim=1) - assert list(out.shape) == [3, steps, 16] - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -@torch.no_grad() -def test_rope_streaming_past_context(): - set_efficient_attention_backend('xformers') - torch.manual_seed(1234) - - for context in [None, 10]: - tr = StreamingTransformer( - 16, 4, 1 if context else 2, - causal=True, past_context=context, custom=True, - dropout=0., positional_embedding='rope') - tr.eval() - - steps = 20 - x = torch.randn(3, steps, 16) - ref = tr(x) - - with tr.streaming(): - outs = [] - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr(frame)) - - out = torch.cat(outs, dim=1) - assert list(out.shape) == [3, steps, 16] - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -def test_rope_memory_efficient(): - set_efficient_attention_backend('xformers') - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1, - positional_embedding='rope') - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1, - positional_embedding='rope') - tr_mem_efficient.load_state_dict(tr.state_dict()) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_mem_efficient(x) - # Check at float precision b/c this is the rope default. - assert torch.allclose(y, y2, atol=1e-7), (y - y2).norm() - - -def test_rope_with_xpos(): - set_efficient_attention_backend('xformers') - B, T, H, C = 8, 75, 16, 128 - - rope = RotaryEmbedding(dim=C, xpos=True) - xq = torch.rand((B, T, H, C)) - xk = torch.rand((B, T, H, C)) - xq_out, xk_out = rope.rotate_qk(xq, xk, start=7) - - assert list(xq_out.shape) == [B, T, H, C] - assert list(xk_out.shape) == [B, T, H, C] - - -def test_positional_scale(): - set_efficient_attention_backend('xformers') - B, T, H, C = 8, 75, 16, 128 - - rope = RotaryEmbedding(dim=C, xpos=True, scale=0.0) - xq = torch.rand((B, T, H, C)) - xk = torch.rand((B, T, H, C)) - xq_out, xk_out = rope.rotate_qk(xq, xk, start=7) - - assert torch.allclose(xq, xq_out) - assert torch.allclose(xk, xk_out) diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/vertices.py b/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/vertices.py deleted file mode 100644 index 78be1b12a2fec4ca43ab9065e99a0a1ba368be5a..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/vertices.py +++ /dev/null @@ -1,17 +0,0 @@ -import numpy as np - - -def prepare_vertices(vertices, canonicalize=True): - data = vertices - # Swap axis (gravity=Z instead of Y) - # data = data[..., [2, 0, 1]] - - # Make left/right correct - # data[..., [1]] = -data[..., [1]] - - # Center the first root to the first frame - data -= data[[0], [0], :] - - # Remove the floor - data[..., 2] -= np.min(data[..., 2]) - return data diff --git a/spaces/OptimalScale/Robin-7b/lmflow/datasets/__init__.py b/spaces/OptimalScale/Robin-7b/lmflow/datasets/__init__.py deleted file mode 100644 index a0342a0fd34525ffa7731ddbed4015bb3555651c..0000000000000000000000000000000000000000 --- a/spaces/OptimalScale/Robin-7b/lmflow/datasets/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -"""This Python code defines a class Dataset with methods for initializing, loading, -and manipulating datasets from different backends such as Hugging Face and JSON. - -The `Dataset` class includes methods for loading datasets from a dictionary and a Hugging -Face dataset, mapping datasets, and retrieving the backend dataset and arguments. -""" -from lmflow.datasets.dataset import Dataset diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/builder.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/builder.py deleted file mode 100644 index 7567316c566bd3aca6d8f65a84b00e9e890948a7..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/builder.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..runner import Sequential -from ..utils import Registry, build_from_cfg - - -def build_model_from_cfg(cfg, registry, default_args=None): - """Build a PyTorch model from config dict(s). Different from - ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built. - - Args: - cfg (dict, list[dict]): The config of modules, is is either a config - dict or a list of config dicts. If cfg is a list, a - the built modules will be wrapped with ``nn.Sequential``. - registry (:obj:`Registry`): A registry the module belongs to. - default_args (dict, optional): Default arguments to build the module. - Defaults to None. - - Returns: - nn.Module: A built nn module. - """ - if isinstance(cfg, list): - modules = [ - build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg - ] - return Sequential(*modules) - else: - return build_from_cfg(cfg, registry, default_args) - - -MODELS = Registry('model', build_func=build_model_from_cfg) diff --git a/spaces/PKUWilliamYang/StyleGANEX/models/mtcnn/__init__.py b/spaces/PKUWilliamYang/StyleGANEX/models/mtcnn/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-60.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-60.go deleted file mode 100644 index 346acd81a76fd816759258e3bcf7661e4b0f7db7..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-60.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/lilylib.py b/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/lilylib.py deleted file mode 100644 index 34de352a580dd3a96c7fbf8de3a9bab2bf10ffcb..0000000000000000000000000000000000000000 --- a/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/lilylib.py +++ /dev/null @@ -1,141 +0,0 @@ -# This file is part of LilyPond, the GNU music typesetter. -# -# Copyright (C) 1998--2022 Han-Wen Nienhuys -# Jan Nieuwenhuizen -# -# LilyPond is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# LilyPond is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with LilyPond. If not, see . - -import __main__ -import codecs -import gettext -import optparse -import os -import sys - -sys.stdin = codecs.getreader('utf-8')(sys.stdin.detach()) -sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach()) -sys.stderr = codecs.getwriter('utf-8')(sys.stderr.detach()) - -# Lilylib globals. -program_name = os.path.basename(sys.argv[0]) - -# Logging framework: We have the following output functions: -# error -# warning -# progress -# debug - -# TODO: use the standard logging module -_loglevels = {"NONE": 0, "ERROR": 1, "WARN": 2, - "BASIC": 3, "PROGRESS": 4, "INFO": 5, "DEBUG": 6} - -_loglevel = _loglevels["PROGRESS"] - - -def set_loglevel(l): - global _loglevel - newlevel = _loglevels.get(l, -1) - if newlevel >= 0: - debug_output(_("Setting loglevel to %s") % l) - _loglevel = newlevel - else: - error(_("Unknown or invalid loglevel '%s'") % l) - - -def handle_loglevel_option(option, opt_str, value, parser, *args): - if value: - set_loglevel(value) - elif args: - set_loglevel(args[0]) - - -def _is_loglevel(l): - global _loglevel - return _loglevel >= _loglevels[l] - - -def is_verbose(): - return _is_loglevel("DEBUG") - - -def _print_logmessage(level, s, fullmessage=True, newline=True): - if _is_loglevel(level): - if fullmessage: - s = program_name + ": " + s + "\n" - elif newline: - s += '\n' - sys.stderr.write(s) - sys.stderr.flush() - - -def error(s): - _print_logmessage("ERROR", _("error: %s") % s) - - -def warning(s): - _print_logmessage("WARN", _("warning: %s") % s) - - -def progress(s, fullmessage=False, newline=True): - _print_logmessage("PROGRESS", s, fullmessage, newline) - - -def debug_output(s, fullmessage=False, newline=True): - _print_logmessage("DEBUG", s, fullmessage, newline) - - -class _NonDentedHeadingFormatter (optparse.IndentedHelpFormatter): - def format_heading(self, heading): - if heading: - return heading[0].upper() + heading[1:] + ':\n' - return '' - - def format_option_strings(self, option): - sep = ' ' - if option._short_opts and option._long_opts: - sep = ',' - - metavar = '' - if option.takes_value(): - metavar = '=%s' % option.metavar or option.dest.upper() - - return "%3s%s %s%s" % (" ".join(option._short_opts), - sep, - " ".join(option._long_opts), - metavar) - - # Only use one level of indentation (even for groups and nested groups), - # since we don't indent the headings, either - def indent(self): - self.current_indent = self.indent_increment - self.level += 1 - - def dedent(self): - self.level -= 1 - if self.level <= 0: - self.current_indent = '' - self.level = 0 - - def format_usage(self, usage): - return _("Usage: %s") % usage + '\n' - - def format_description(self, description): - return description - - -def get_option_parser(*args, **kwargs): - p = optparse.OptionParser(*args, **kwargs) - p.formatter = _NonDentedHeadingFormatter() - p.formatter.set_parser(p) - return p diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py b/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py deleted file mode 100644 index be777123a886503172a95fe0719e956a147bbd68..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py +++ /dev/null @@ -1,48 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='EncHead', - in_channels=[512, 1024, 2048], - in_index=(1, 2, 3), - channels=512, - num_codes=32, - use_se_loss=True, - add_lateral=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_se_decode=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/schedules/schedule_20k.py b/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/schedules/schedule_20k.py deleted file mode 100644 index bf780a1b6f6521833c6a5859675147824efa599d..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/schedules/schedule_20k.py +++ /dev/null @@ -1,9 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=20000) -checkpoint_config = dict(by_epoch=False, interval=2000) -evaluation = dict(interval=2000, metric='mIoU') diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/grids/musicgen/__init__.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/grids/musicgen/__init__.py deleted file mode 100644 index d3f101f5a29ff85271e44e4f27545168a8f27baa..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/grids/musicgen/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""MusicGen grids.""" diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/transformer/permuter.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/transformer/permuter.py deleted file mode 100644 index 0d43bb135adde38d94bf18a7e5edaa4523cd95cf..0000000000000000000000000000000000000000 --- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/transformer/permuter.py +++ /dev/null @@ -1,248 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np - - -class AbstractPermuter(nn.Module): - def __init__(self, *args, **kwargs): - super().__init__() - def forward(self, x, reverse=False): - raise NotImplementedError - - -class Identity(AbstractPermuter): - def __init__(self): - super().__init__() - - def forward(self, x, reverse=False): - return x - - -class Subsample(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - C = 1 - indices = np.arange(H*W).reshape(C,H,W) - while min(H, W) > 1: - indices = indices.reshape(C,H//2,2,W//2,2) - indices = indices.transpose(0,2,4,1,3) - indices = indices.reshape(C*4,H//2, W//2) - H = H//2 - W = W//2 - C = C*4 - assert H == W == 1 - idx = torch.tensor(indices.ravel()) - self.register_buffer('forward_shuffle_idx', - nn.Parameter(idx, requires_grad=False)) - self.register_buffer('backward_shuffle_idx', - nn.Parameter(torch.argsort(idx), requires_grad=False)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -def mortonify(i, j): - """(i,j) index to linear morton code""" - i = np.uint64(i) - j = np.uint64(j) - - z = np.uint(0) - - for pos in range(32): - z = (z | - ((j & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos)) | - ((i & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos+1)) - ) - return z - - -class ZCurve(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - reverseidx = [np.int64(mortonify(i,j)) for i in range(H) for j in range(W)] - idx = np.argsort(reverseidx) - idx = torch.tensor(idx) - reverseidx = torch.tensor(reverseidx) - self.register_buffer('forward_shuffle_idx', - idx) - self.register_buffer('backward_shuffle_idx', - reverseidx) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -class SpiralOut(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - assert H == W - size = W - indices = np.arange(size*size).reshape(size,size) - - i0 = size//2 - j0 = size//2-1 - - i = i0 - j = j0 - - idx = [indices[i0, j0]] - step_mult = 0 - for c in range(1, size//2+1): - step_mult += 1 - # steps left - for k in range(step_mult): - i = i - 1 - j = j - idx.append(indices[i, j]) - - # step down - for k in range(step_mult): - i = i - j = j + 1 - idx.append(indices[i, j]) - - step_mult += 1 - if c < size//2: - # step right - for k in range(step_mult): - i = i + 1 - j = j - idx.append(indices[i, j]) - - # step up - for k in range(step_mult): - i = i - j = j - 1 - idx.append(indices[i, j]) - else: - # end reached - for k in range(step_mult-1): - i = i + 1 - idx.append(indices[i, j]) - - assert len(idx) == size*size - idx = torch.tensor(idx) - self.register_buffer('forward_shuffle_idx', idx) - self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -class SpiralIn(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - assert H == W - size = W - indices = np.arange(size*size).reshape(size,size) - - i0 = size//2 - j0 = size//2-1 - - i = i0 - j = j0 - - idx = [indices[i0, j0]] - step_mult = 0 - for c in range(1, size//2+1): - step_mult += 1 - # steps left - for k in range(step_mult): - i = i - 1 - j = j - idx.append(indices[i, j]) - - # step down - for k in range(step_mult): - i = i - j = j + 1 - idx.append(indices[i, j]) - - step_mult += 1 - if c < size//2: - # step right - for k in range(step_mult): - i = i + 1 - j = j - idx.append(indices[i, j]) - - # step up - for k in range(step_mult): - i = i - j = j - 1 - idx.append(indices[i, j]) - else: - # end reached - for k in range(step_mult-1): - i = i + 1 - idx.append(indices[i, j]) - - assert len(idx) == size*size - idx = idx[::-1] - idx = torch.tensor(idx) - self.register_buffer('forward_shuffle_idx', idx) - self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -class Random(nn.Module): - def __init__(self, H, W): - super().__init__() - indices = np.random.RandomState(1).permutation(H*W) - idx = torch.tensor(indices.ravel()) - self.register_buffer('forward_shuffle_idx', idx) - self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -class AlternateParsing(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - indices = np.arange(W*H).reshape(H,W) - for i in range(1, H, 2): - indices[i, :] = indices[i, ::-1] - idx = indices.flatten() - assert len(idx) == H*W - idx = torch.tensor(idx) - self.register_buffer('forward_shuffle_idx', idx) - self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -if __name__ == "__main__": - p0 = AlternateParsing(16, 16) - print(p0.forward_shuffle_idx) - print(p0.backward_shuffle_idx) - - x = torch.randint(0, 768, size=(11, 256)) - y = p0(x) - xre = p0(y, reverse=True) - assert torch.equal(x, xre) - - p1 = SpiralOut(2, 2) - print(p1.forward_shuffle_idx) - print(p1.backward_shuffle_idx) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/cli/spinners.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/cli/spinners.py deleted file mode 100644 index cf2b976f377c2656afb3d84add8d30b0fc280c03..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/cli/spinners.py +++ /dev/null @@ -1,159 +0,0 @@ -import contextlib -import itertools -import logging -import sys -import time -from typing import IO, Generator, Optional - -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.logging import get_indentation - -logger = logging.getLogger(__name__) - - -class SpinnerInterface: - def spin(self) -> None: - raise NotImplementedError() - - def finish(self, final_status: str) -> None: - raise NotImplementedError() - - -class InteractiveSpinner(SpinnerInterface): - def __init__( - self, - message: str, - file: Optional[IO[str]] = None, - spin_chars: str = "-\\|/", - # Empirically, 8 updates/second looks nice - min_update_interval_seconds: float = 0.125, - ): - self._message = message - if file is None: - file = sys.stdout - self._file = file - self._rate_limiter = RateLimiter(min_update_interval_seconds) - self._finished = False - - self._spin_cycle = itertools.cycle(spin_chars) - - self._file.write(" " * get_indentation() + self._message + " ... ") - self._width = 0 - - def _write(self, status: str) -> None: - assert not self._finished - # Erase what we wrote before by backspacing to the beginning, writing - # spaces to overwrite the old text, and then backspacing again - backup = "\b" * self._width - self._file.write(backup + " " * self._width + backup) - # Now we have a blank slate to add our status - self._file.write(status) - self._width = len(status) - self._file.flush() - self._rate_limiter.reset() - - def spin(self) -> None: - if self._finished: - return - if not self._rate_limiter.ready(): - return - self._write(next(self._spin_cycle)) - - def finish(self, final_status: str) -> None: - if self._finished: - return - self._write(final_status) - self._file.write("\n") - self._file.flush() - self._finished = True - - -# Used for dumb terminals, non-interactive installs (no tty), etc. -# We still print updates occasionally (once every 60 seconds by default) to -# act as a keep-alive for systems like Travis-CI that take lack-of-output as -# an indication that a task has frozen. -class NonInteractiveSpinner(SpinnerInterface): - def __init__(self, message: str, min_update_interval_seconds: float = 60.0) -> None: - self._message = message - self._finished = False - self._rate_limiter = RateLimiter(min_update_interval_seconds) - self._update("started") - - def _update(self, status: str) -> None: - assert not self._finished - self._rate_limiter.reset() - logger.info("%s: %s", self._message, status) - - def spin(self) -> None: - if self._finished: - return - if not self._rate_limiter.ready(): - return - self._update("still running...") - - def finish(self, final_status: str) -> None: - if self._finished: - return - self._update(f"finished with status '{final_status}'") - self._finished = True - - -class RateLimiter: - def __init__(self, min_update_interval_seconds: float) -> None: - self._min_update_interval_seconds = min_update_interval_seconds - self._last_update: float = 0 - - def ready(self) -> bool: - now = time.time() - delta = now - self._last_update - return delta >= self._min_update_interval_seconds - - def reset(self) -> None: - self._last_update = time.time() - - -@contextlib.contextmanager -def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]: - # Interactive spinner goes directly to sys.stdout rather than being routed - # through the logging system, but it acts like it has level INFO, - # i.e. it's only displayed if we're at level INFO or better. - # Non-interactive spinner goes through the logging system, so it is always - # in sync with logging configuration. - if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO: - spinner: SpinnerInterface = InteractiveSpinner(message) - else: - spinner = NonInteractiveSpinner(message) - try: - with hidden_cursor(sys.stdout): - yield spinner - except KeyboardInterrupt: - spinner.finish("canceled") - raise - except Exception: - spinner.finish("error") - raise - else: - spinner.finish("done") - - -HIDE_CURSOR = "\x1b[?25l" -SHOW_CURSOR = "\x1b[?25h" - - -@contextlib.contextmanager -def hidden_cursor(file: IO[str]) -> Generator[None, None, None]: - # The Windows terminal does not support the hide/show cursor ANSI codes, - # even via colorama. So don't even try. - if WINDOWS: - yield - # We don't want to clutter the output with control characters if we're - # writing to a file, or if the user is running with --quiet. - # See https://github.com/pypa/pip/issues/3418 - elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO: - yield - else: - file.write(HIDE_CURSOR) - try: - yield - finally: - file.write(SHOW_CURSOR) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/syntax.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/syntax.py deleted file mode 100644 index dace718c1b5fab7b90ed5d77283a9f907b78b4e9..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/syntax.py +++ /dev/null @@ -1,934 +0,0 @@ -import os.path -import platform -import re -import sys -import textwrap -from abc import ABC, abstractmethod -from typing import ( - Any, - Dict, - Iterable, - List, - NamedTuple, - Optional, - Sequence, - Set, - Tuple, - Type, - Union, -) - -from pip._vendor.pygments.lexer import Lexer -from pip._vendor.pygments.lexers import get_lexer_by_name, guess_lexer_for_filename -from pip._vendor.pygments.style import Style as PygmentsStyle -from pip._vendor.pygments.styles import get_style_by_name -from pip._vendor.pygments.token import ( - Comment, - Error, - Generic, - Keyword, - Name, - Number, - Operator, - String, - Token, - Whitespace, -) -from pip._vendor.pygments.util import ClassNotFound - -from pip._vendor.rich.containers import Lines -from pip._vendor.rich.padding import Padding, PaddingDimensions - -from ._loop import loop_first -from .color import Color, blend_rgb -from .console import Console, ConsoleOptions, JustifyMethod, RenderResult -from .jupyter import JupyterMixin -from .measure import Measurement -from .segment import Segment, Segments -from .style import Style, StyleType -from .text import Text - -TokenType = Tuple[str, ...] - -WINDOWS = platform.system() == "Windows" -DEFAULT_THEME = "monokai" - -# The following styles are based on https://github.com/pygments/pygments/blob/master/pygments/formatters/terminal.py -# A few modifications were made - -ANSI_LIGHT: Dict[TokenType, Style] = { - Token: Style(), - Whitespace: Style(color="white"), - Comment: Style(dim=True), - Comment.Preproc: Style(color="cyan"), - Keyword: Style(color="blue"), - Keyword.Type: Style(color="cyan"), - Operator.Word: Style(color="magenta"), - Name.Builtin: Style(color="cyan"), - Name.Function: Style(color="green"), - Name.Namespace: Style(color="cyan", underline=True), - Name.Class: Style(color="green", underline=True), - Name.Exception: Style(color="cyan"), - Name.Decorator: Style(color="magenta", bold=True), - Name.Variable: Style(color="red"), - Name.Constant: Style(color="red"), - Name.Attribute: Style(color="cyan"), - Name.Tag: Style(color="bright_blue"), - String: Style(color="yellow"), - Number: Style(color="blue"), - Generic.Deleted: Style(color="bright_red"), - Generic.Inserted: Style(color="green"), - Generic.Heading: Style(bold=True), - Generic.Subheading: Style(color="magenta", bold=True), - Generic.Prompt: Style(bold=True), - Generic.Error: Style(color="bright_red"), - Error: Style(color="red", underline=True), -} - -ANSI_DARK: Dict[TokenType, Style] = { - Token: Style(), - Whitespace: Style(color="bright_black"), - Comment: Style(dim=True), - Comment.Preproc: Style(color="bright_cyan"), - Keyword: Style(color="bright_blue"), - Keyword.Type: Style(color="bright_cyan"), - Operator.Word: Style(color="bright_magenta"), - Name.Builtin: Style(color="bright_cyan"), - Name.Function: Style(color="bright_green"), - Name.Namespace: Style(color="bright_cyan", underline=True), - Name.Class: Style(color="bright_green", underline=True), - Name.Exception: Style(color="bright_cyan"), - Name.Decorator: Style(color="bright_magenta", bold=True), - Name.Variable: Style(color="bright_red"), - Name.Constant: Style(color="bright_red"), - Name.Attribute: Style(color="bright_cyan"), - Name.Tag: Style(color="bright_blue"), - String: Style(color="yellow"), - Number: Style(color="bright_blue"), - Generic.Deleted: Style(color="bright_red"), - Generic.Inserted: Style(color="bright_green"), - Generic.Heading: Style(bold=True), - Generic.Subheading: Style(color="bright_magenta", bold=True), - Generic.Prompt: Style(bold=True), - Generic.Error: Style(color="bright_red"), - Error: Style(color="red", underline=True), -} - -RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK} -NUMBERS_COLUMN_DEFAULT_PADDING = 2 - - -class SyntaxTheme(ABC): - """Base class for a syntax theme.""" - - @abstractmethod - def get_style_for_token(self, token_type: TokenType) -> Style: - """Get a style for a given Pygments token.""" - raise NotImplementedError # pragma: no cover - - @abstractmethod - def get_background_style(self) -> Style: - """Get the background color.""" - raise NotImplementedError # pragma: no cover - - -class PygmentsSyntaxTheme(SyntaxTheme): - """Syntax theme that delegates to Pygments theme.""" - - def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None: - self._style_cache: Dict[TokenType, Style] = {} - if isinstance(theme, str): - try: - self._pygments_style_class = get_style_by_name(theme) - except ClassNotFound: - self._pygments_style_class = get_style_by_name("default") - else: - self._pygments_style_class = theme - - self._background_color = self._pygments_style_class.background_color - self._background_style = Style(bgcolor=self._background_color) - - def get_style_for_token(self, token_type: TokenType) -> Style: - """Get a style from a Pygments class.""" - try: - return self._style_cache[token_type] - except KeyError: - try: - pygments_style = self._pygments_style_class.style_for_token(token_type) - except KeyError: - style = Style.null() - else: - color = pygments_style["color"] - bgcolor = pygments_style["bgcolor"] - style = Style( - color="#" + color if color else "#000000", - bgcolor="#" + bgcolor if bgcolor else self._background_color, - bold=pygments_style["bold"], - italic=pygments_style["italic"], - underline=pygments_style["underline"], - ) - self._style_cache[token_type] = style - return style - - def get_background_style(self) -> Style: - return self._background_style - - -class ANSISyntaxTheme(SyntaxTheme): - """Syntax theme to use standard colors.""" - - def __init__(self, style_map: Dict[TokenType, Style]) -> None: - self.style_map = style_map - self._missing_style = Style.null() - self._background_style = Style.null() - self._style_cache: Dict[TokenType, Style] = {} - - def get_style_for_token(self, token_type: TokenType) -> Style: - """Look up style in the style map.""" - try: - return self._style_cache[token_type] - except KeyError: - # Styles form a hierarchy - # We need to go from most to least specific - # e.g. ("foo", "bar", "baz") to ("foo", "bar") to ("foo",) - get_style = self.style_map.get - token = tuple(token_type) - style = self._missing_style - while token: - _style = get_style(token) - if _style is not None: - style = _style - break - token = token[:-1] - self._style_cache[token_type] = style - return style - - def get_background_style(self) -> Style: - return self._background_style - - -SyntaxPosition = Tuple[int, int] - - -class _SyntaxHighlightRange(NamedTuple): - """ - A range to highlight in a Syntax object. - `start` and `end` are 2-integers tuples, where the first integer is the line number - (starting from 1) and the second integer is the column index (starting from 0). - """ - - style: StyleType - start: SyntaxPosition - end: SyntaxPosition - - -class Syntax(JupyterMixin): - """Construct a Syntax object to render syntax highlighted code. - - Args: - code (str): Code to highlight. - lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/) - theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai". - dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False. - line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. - start_line (int, optional): Starting number for line numbers. Defaults to 1. - line_range (Tuple[int | None, int | None], optional): If given should be a tuple of the start and end line to render. - A value of None in the tuple indicates the range is open in that direction. - highlight_lines (Set[int]): A set of line numbers to highlight. - code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. - tab_size (int, optional): Size of tabs. Defaults to 4. - word_wrap (bool, optional): Enable word wrapping. - background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. - indent_guides (bool, optional): Show indent guides. Defaults to False. - padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). - """ - - _pygments_style_class: Type[PygmentsStyle] - _theme: SyntaxTheme - - @classmethod - def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme: - """Get a syntax theme instance.""" - if isinstance(name, SyntaxTheme): - return name - theme: SyntaxTheme - if name in RICH_SYNTAX_THEMES: - theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name]) - else: - theme = PygmentsSyntaxTheme(name) - return theme - - def __init__( - self, - code: str, - lexer: Union[Lexer, str], - *, - theme: Union[str, SyntaxTheme] = DEFAULT_THEME, - dedent: bool = False, - line_numbers: bool = False, - start_line: int = 1, - line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, - highlight_lines: Optional[Set[int]] = None, - code_width: Optional[int] = None, - tab_size: int = 4, - word_wrap: bool = False, - background_color: Optional[str] = None, - indent_guides: bool = False, - padding: PaddingDimensions = 0, - ) -> None: - self.code = code - self._lexer = lexer - self.dedent = dedent - self.line_numbers = line_numbers - self.start_line = start_line - self.line_range = line_range - self.highlight_lines = highlight_lines or set() - self.code_width = code_width - self.tab_size = tab_size - self.word_wrap = word_wrap - self.background_color = background_color - self.background_style = ( - Style(bgcolor=background_color) if background_color else Style() - ) - self.indent_guides = indent_guides - self.padding = padding - - self._theme = self.get_theme(theme) - self._stylized_ranges: List[_SyntaxHighlightRange] = [] - - @classmethod - def from_path( - cls, - path: str, - encoding: str = "utf-8", - lexer: Optional[Union[Lexer, str]] = None, - theme: Union[str, SyntaxTheme] = DEFAULT_THEME, - dedent: bool = False, - line_numbers: bool = False, - line_range: Optional[Tuple[int, int]] = None, - start_line: int = 1, - highlight_lines: Optional[Set[int]] = None, - code_width: Optional[int] = None, - tab_size: int = 4, - word_wrap: bool = False, - background_color: Optional[str] = None, - indent_guides: bool = False, - padding: PaddingDimensions = 0, - ) -> "Syntax": - """Construct a Syntax object from a file. - - Args: - path (str): Path to file to highlight. - encoding (str): Encoding of file. - lexer (str | Lexer, optional): Lexer to use. If None, lexer will be auto-detected from path/file content. - theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs". - dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True. - line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. - start_line (int, optional): Starting number for line numbers. Defaults to 1. - line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render. - highlight_lines (Set[int]): A set of line numbers to highlight. - code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. - tab_size (int, optional): Size of tabs. Defaults to 4. - word_wrap (bool, optional): Enable word wrapping of code. - background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. - indent_guides (bool, optional): Show indent guides. Defaults to False. - padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). - - Returns: - [Syntax]: A Syntax object that may be printed to the console - """ - with open(path, "rt", encoding=encoding) as code_file: - code = code_file.read() - - if not lexer: - lexer = cls.guess_lexer(path, code=code) - - return cls( - code, - lexer, - theme=theme, - dedent=dedent, - line_numbers=line_numbers, - line_range=line_range, - start_line=start_line, - highlight_lines=highlight_lines, - code_width=code_width, - tab_size=tab_size, - word_wrap=word_wrap, - background_color=background_color, - indent_guides=indent_guides, - padding=padding, - ) - - @classmethod - def guess_lexer(cls, path: str, code: Optional[str] = None) -> str: - """Guess the alias of the Pygments lexer to use based on a path and an optional string of code. - If code is supplied, it will use a combination of the code and the filename to determine the - best lexer to use. For example, if the file is ``index.html`` and the file contains Django - templating syntax, then "html+django" will be returned. If the file is ``index.html``, and no - templating language is used, the "html" lexer will be used. If no string of code - is supplied, the lexer will be chosen based on the file extension.. - - Args: - path (AnyStr): The path to the file containing the code you wish to know the lexer for. - code (str, optional): Optional string of code that will be used as a fallback if no lexer - is found for the supplied path. - - Returns: - str: The name of the Pygments lexer that best matches the supplied path/code. - """ - lexer: Optional[Lexer] = None - lexer_name = "default" - if code: - try: - lexer = guess_lexer_for_filename(path, code) - except ClassNotFound: - pass - - if not lexer: - try: - _, ext = os.path.splitext(path) - if ext: - extension = ext.lstrip(".").lower() - lexer = get_lexer_by_name(extension) - except ClassNotFound: - pass - - if lexer: - if lexer.aliases: - lexer_name = lexer.aliases[0] - else: - lexer_name = lexer.name - - return lexer_name - - def _get_base_style(self) -> Style: - """Get the base style.""" - default_style = self._theme.get_background_style() + self.background_style - return default_style - - def _get_token_color(self, token_type: TokenType) -> Optional[Color]: - """Get a color (if any) for the given token. - - Args: - token_type (TokenType): A token type tuple from Pygments. - - Returns: - Optional[Color]: Color from theme, or None for no color. - """ - style = self._theme.get_style_for_token(token_type) - return style.color - - @property - def lexer(self) -> Optional[Lexer]: - """The lexer for this syntax, or None if no lexer was found. - - Tries to find the lexer by name if a string was passed to the constructor. - """ - - if isinstance(self._lexer, Lexer): - return self._lexer - try: - return get_lexer_by_name( - self._lexer, - stripnl=False, - ensurenl=True, - tabsize=self.tab_size, - ) - except ClassNotFound: - return None - - def highlight( - self, - code: str, - line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, - ) -> Text: - """Highlight code and return a Text instance. - - Args: - code (str): Code to highlight. - line_range(Tuple[int, int], optional): Optional line range to highlight. - - Returns: - Text: A text instance containing highlighted syntax. - """ - - base_style = self._get_base_style() - justify: JustifyMethod = ( - "default" if base_style.transparent_background else "left" - ) - - text = Text( - justify=justify, - style=base_style, - tab_size=self.tab_size, - no_wrap=not self.word_wrap, - ) - _get_theme_style = self._theme.get_style_for_token - - lexer = self.lexer - - if lexer is None: - text.append(code) - else: - if line_range: - # More complicated path to only stylize a portion of the code - # This speeds up further operations as there are less spans to process - line_start, line_end = line_range - - def line_tokenize() -> Iterable[Tuple[Any, str]]: - """Split tokens to one per line.""" - assert lexer # required to make MyPy happy - we know lexer is not None at this point - - for token_type, token in lexer.get_tokens(code): - while token: - line_token, new_line, token = token.partition("\n") - yield token_type, line_token + new_line - - def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]: - """Convert tokens to spans.""" - tokens = iter(line_tokenize()) - line_no = 0 - _line_start = line_start - 1 if line_start else 0 - - # Skip over tokens until line start - while line_no < _line_start: - _token_type, token = next(tokens) - yield (token, None) - if token.endswith("\n"): - line_no += 1 - # Generate spans until line end - for token_type, token in tokens: - yield (token, _get_theme_style(token_type)) - if token.endswith("\n"): - line_no += 1 - if line_end and line_no >= line_end: - break - - text.append_tokens(tokens_to_spans()) - - else: - text.append_tokens( - (token, _get_theme_style(token_type)) - for token_type, token in lexer.get_tokens(code) - ) - if self.background_color is not None: - text.stylize(f"on {self.background_color}") - - if self._stylized_ranges: - self._apply_stylized_ranges(text) - - return text - - def stylize_range( - self, style: StyleType, start: SyntaxPosition, end: SyntaxPosition - ) -> None: - """ - Adds a custom style on a part of the code, that will be applied to the syntax display when it's rendered. - Line numbers are 1-based, while column indexes are 0-based. - - Args: - style (StyleType): The style to apply. - start (Tuple[int, int]): The start of the range, in the form `[line number, column index]`. - end (Tuple[int, int]): The end of the range, in the form `[line number, column index]`. - """ - self._stylized_ranges.append(_SyntaxHighlightRange(style, start, end)) - - def _get_line_numbers_color(self, blend: float = 0.3) -> Color: - background_style = self._theme.get_background_style() + self.background_style - background_color = background_style.bgcolor - if background_color is None or background_color.is_system_defined: - return Color.default() - foreground_color = self._get_token_color(Token.Text) - if foreground_color is None or foreground_color.is_system_defined: - return foreground_color or Color.default() - new_color = blend_rgb( - background_color.get_truecolor(), - foreground_color.get_truecolor(), - cross_fade=blend, - ) - return Color.from_triplet(new_color) - - @property - def _numbers_column_width(self) -> int: - """Get the number of characters used to render the numbers column.""" - column_width = 0 - if self.line_numbers: - column_width = ( - len(str(self.start_line + self.code.count("\n"))) - + NUMBERS_COLUMN_DEFAULT_PADDING - ) - return column_width - - def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]: - """Get background, number, and highlight styles for line numbers.""" - background_style = self._get_base_style() - if background_style.transparent_background: - return Style.null(), Style(dim=True), Style.null() - if console.color_system in ("256", "truecolor"): - number_style = Style.chain( - background_style, - self._theme.get_style_for_token(Token.Text), - Style(color=self._get_line_numbers_color()), - self.background_style, - ) - highlight_number_style = Style.chain( - background_style, - self._theme.get_style_for_token(Token.Text), - Style(bold=True, color=self._get_line_numbers_color(0.9)), - self.background_style, - ) - else: - number_style = background_style + Style(dim=True) - highlight_number_style = background_style + Style(dim=False) - return background_style, number_style, highlight_number_style - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> "Measurement": - _, right, _, left = Padding.unpack(self.padding) - if self.code_width is not None: - width = self.code_width + self._numbers_column_width + right + left - return Measurement(self._numbers_column_width, width) - return Measurement(self._numbers_column_width, options.max_width) - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - segments = Segments(self._get_syntax(console, options)) - if self.padding: - yield Padding( - segments, style=self._theme.get_background_style(), pad=self.padding - ) - else: - yield segments - - def _get_syntax( - self, - console: Console, - options: ConsoleOptions, - ) -> Iterable[Segment]: - """ - Get the Segments for the Syntax object, excluding any vertical/horizontal padding - """ - transparent_background = self._get_base_style().transparent_background - code_width = ( - ( - (options.max_width - self._numbers_column_width - 1) - if self.line_numbers - else options.max_width - ) - if self.code_width is None - else self.code_width - ) - - ends_on_nl, processed_code = self._process_code(self.code) - text = self.highlight(processed_code, self.line_range) - - if not self.line_numbers and not self.word_wrap and not self.line_range: - if not ends_on_nl: - text.remove_suffix("\n") - # Simple case of just rendering text - style = ( - self._get_base_style() - + self._theme.get_style_for_token(Comment) - + Style(dim=True) - + self.background_style - ) - if self.indent_guides and not options.ascii_only: - text = text.with_indent_guides(self.tab_size, style=style) - text.overflow = "crop" - if style.transparent_background: - yield from console.render( - text, options=options.update(width=code_width) - ) - else: - syntax_lines = console.render_lines( - text, - options.update(width=code_width, height=None, justify="left"), - style=self.background_style, - pad=True, - new_lines=True, - ) - for syntax_line in syntax_lines: - yield from syntax_line - return - - start_line, end_line = self.line_range or (None, None) - line_offset = 0 - if start_line: - line_offset = max(0, start_line - 1) - lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl) - if self.line_range: - lines = lines[line_offset:end_line] - - if self.indent_guides and not options.ascii_only: - style = ( - self._get_base_style() - + self._theme.get_style_for_token(Comment) - + Style(dim=True) - + self.background_style - ) - lines = ( - Text("\n") - .join(lines) - .with_indent_guides(self.tab_size, style=style) - .split("\n", allow_blank=True) - ) - - numbers_column_width = self._numbers_column_width - render_options = options.update(width=code_width) - - highlight_line = self.highlight_lines.__contains__ - _Segment = Segment - new_line = _Segment("\n") - - line_pointer = "> " if options.legacy_windows else "❱ " - - ( - background_style, - number_style, - highlight_number_style, - ) = self._get_number_styles(console) - - for line_no, line in enumerate(lines, self.start_line + line_offset): - if self.word_wrap: - wrapped_lines = console.render_lines( - line, - render_options.update(height=None, justify="left"), - style=background_style, - pad=not transparent_background, - ) - else: - segments = list(line.render(console, end="")) - if options.no_wrap: - wrapped_lines = [segments] - else: - wrapped_lines = [ - _Segment.adjust_line_length( - segments, - render_options.max_width, - style=background_style, - pad=not transparent_background, - ) - ] - - if self.line_numbers: - wrapped_line_left_pad = _Segment( - " " * numbers_column_width + " ", background_style - ) - for first, wrapped_line in loop_first(wrapped_lines): - if first: - line_column = str(line_no).rjust(numbers_column_width - 2) + " " - if highlight_line(line_no): - yield _Segment(line_pointer, Style(color="red")) - yield _Segment(line_column, highlight_number_style) - else: - yield _Segment(" ", highlight_number_style) - yield _Segment(line_column, number_style) - else: - yield wrapped_line_left_pad - yield from wrapped_line - yield new_line - else: - for wrapped_line in wrapped_lines: - yield from wrapped_line - yield new_line - - def _apply_stylized_ranges(self, text: Text) -> None: - """ - Apply stylized ranges to a text instance, - using the given code to determine the right portion to apply the style to. - - Args: - text (Text): Text instance to apply the style to. - """ - code = text.plain - newlines_offsets = [ - # Let's add outer boundaries at each side of the list: - 0, - # N.B. using "\n" here is much faster than using metacharacters such as "^" or "\Z": - *[ - match.start() + 1 - for match in re.finditer("\n", code, flags=re.MULTILINE) - ], - len(code) + 1, - ] - - for stylized_range in self._stylized_ranges: - start = _get_code_index_for_syntax_position( - newlines_offsets, stylized_range.start - ) - end = _get_code_index_for_syntax_position( - newlines_offsets, stylized_range.end - ) - if start is not None and end is not None: - text.stylize(stylized_range.style, start, end) - - def _process_code(self, code: str) -> Tuple[bool, str]: - """ - Applies various processing to a raw code string - (normalises it so it always ends with a line return, dedents it if necessary, etc.) - - Args: - code (str): The raw code string to process - - Returns: - Tuple[bool, str]: the boolean indicates whether the raw code ends with a line return, - while the string is the processed code. - """ - ends_on_nl = code.endswith("\n") - processed_code = code if ends_on_nl else code + "\n" - processed_code = ( - textwrap.dedent(processed_code) if self.dedent else processed_code - ) - processed_code = processed_code.expandtabs(self.tab_size) - return ends_on_nl, processed_code - - -def _get_code_index_for_syntax_position( - newlines_offsets: Sequence[int], position: SyntaxPosition -) -> Optional[int]: - """ - Returns the index of the code string for the given positions. - - Args: - newlines_offsets (Sequence[int]): The offset of each newline character found in the code snippet. - position (SyntaxPosition): The position to search for. - - Returns: - Optional[int]: The index of the code string for this position, or `None` - if the given position's line number is out of range (if it's the column that is out of range - we silently clamp its value so that it reaches the end of the line) - """ - lines_count = len(newlines_offsets) - - line_number, column_index = position - if line_number > lines_count or len(newlines_offsets) < (line_number + 1): - return None # `line_number` is out of range - line_index = line_number - 1 - line_length = newlines_offsets[line_index + 1] - newlines_offsets[line_index] - 1 - # If `column_index` is out of range: let's silently clamp it: - column_index = min(line_length, column_index) - return newlines_offsets[line_index] + column_index - - -if __name__ == "__main__": # pragma: no cover - - import argparse - import sys - - parser = argparse.ArgumentParser( - description="Render syntax to the console with Rich" - ) - parser.add_argument( - "path", - metavar="PATH", - help="path to file, or - for stdin", - ) - parser.add_argument( - "-c", - "--force-color", - dest="force_color", - action="store_true", - default=None, - help="force color for non-terminals", - ) - parser.add_argument( - "-i", - "--indent-guides", - dest="indent_guides", - action="store_true", - default=False, - help="display indent guides", - ) - parser.add_argument( - "-l", - "--line-numbers", - dest="line_numbers", - action="store_true", - help="render line numbers", - ) - parser.add_argument( - "-w", - "--width", - type=int, - dest="width", - default=None, - help="width of output (default will auto-detect)", - ) - parser.add_argument( - "-r", - "--wrap", - dest="word_wrap", - action="store_true", - default=False, - help="word wrap long lines", - ) - parser.add_argument( - "-s", - "--soft-wrap", - action="store_true", - dest="soft_wrap", - default=False, - help="enable soft wrapping mode", - ) - parser.add_argument( - "-t", "--theme", dest="theme", default="monokai", help="pygments theme" - ) - parser.add_argument( - "-b", - "--background-color", - dest="background_color", - default=None, - help="Override background color", - ) - parser.add_argument( - "-x", - "--lexer", - default=None, - dest="lexer_name", - help="Lexer name", - ) - parser.add_argument( - "-p", "--padding", type=int, default=0, dest="padding", help="Padding" - ) - parser.add_argument( - "--highlight-line", - type=int, - default=None, - dest="highlight_line", - help="The line number (not index!) to highlight", - ) - args = parser.parse_args() - - from pip._vendor.rich.console import Console - - console = Console(force_terminal=args.force_color, width=args.width) - - if args.path == "-": - code = sys.stdin.read() - syntax = Syntax( - code=code, - lexer=args.lexer_name, - line_numbers=args.line_numbers, - word_wrap=args.word_wrap, - theme=args.theme, - background_color=args.background_color, - indent_guides=args.indent_guides, - padding=args.padding, - highlight_lines={args.highlight_line}, - ) - else: - syntax = Syntax.from_path( - args.path, - lexer=args.lexer_name, - line_numbers=args.line_numbers, - word_wrap=args.word_wrap, - theme=args.theme, - background_color=args.background_color, - indent_guides=args.indent_guides, - padding=args.padding, - highlight_lines={args.highlight_line}, - ) - console.print(syntax, soft_wrap=args.soft_wrap) diff --git a/spaces/Realcat/image-matching-webui/third_party/Roma/README.md b/spaces/Realcat/image-matching-webui/third_party/Roma/README.md deleted file mode 100644 index 5e984366c8f7af37615d7666f34cd82a90073fee..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/Roma/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# RoMa: Revisiting Robust Losses for Dense Feature Matching -### [Project Page (TODO)](https://parskatt.github.io/RoMa) | [Paper](https://arxiv.org/abs/2305.15404) -
- -> RoMa: Revisiting Robust Lossses for Dense Feature Matching -> [Johan Edstedt](https://scholar.google.com/citations?user=Ul-vMR0AAAAJ), [Qiyu Sun](https://scholar.google.com/citations?user=HS2WuHkAAAAJ), [Georg Bökman](https://scholar.google.com/citations?user=FUE3Wd0AAAAJ), [Mårten Wadenbäck](https://scholar.google.com/citations?user=6WRQpCQAAAAJ), [Michael Felsberg](https://scholar.google.com/citations?&user=lkWfR08AAAAJ) -> Arxiv 2023 - -**NOTE!!! Very early code, there might be bugs** - -The codebase is in the [roma folder](roma). - -## Setup/Install -In your python environment (tested on Linux python 3.10), run: -```bash -pip install -e . -``` -## Demo / How to Use -We provide two demos in the [demos folder](demo). -Here's the gist of it: -```python -from roma import roma_outdoor -roma_model = roma_outdoor(device=device) -# Match -warp, certainty = roma_model.match(imA_path, imB_path, device=device) -# Sample matches for estimation -matches, certainty = roma_model.sample(warp, certainty) -# Convert to pixel coordinates (RoMa produces matches in [-1,1]x[-1,1]) -kptsA, kptsB = roma_model.to_pixel_coordinates(matches, H_A, W_A, H_B, W_B) -# Find a fundamental matrix (or anything else of interest) -F, mask = cv2.findFundamentalMat( - kptsA.cpu().numpy(), kptsB.cpu().numpy(), ransacReprojThreshold=0.2, method=cv2.USAC_MAGSAC, confidence=0.999999, maxIters=10000 -) -``` -## Reproducing Results -The experiments in the paper are provided in the [experiments folder](experiments). - -### Training -1. First follow the instructions provided here: https://github.com/Parskatt/DKM for downloading and preprocessing datasets. -2. Run the relevant experiment, e.g., -```bash -torchrun --nproc_per_node=4 --nnodes=1 --rdzv_backend=c10d experiments/roma_outdoor.py -``` -### Testing -```bash -python experiments/roma_outdoor.py --only_test --benchmark mega-1500 -``` -## License -Due to our dependency on [DINOv2](https://github.com/facebookresearch/dinov2/blob/main/LICENSE), the license is sadly non-commercial only for the moment. - -## Acknowledgement -Our codebase builds on the code in [DKM](https://github.com/Parskatt/DKM). - -## BibTeX -If you find our models useful, please consider citing our paper! -``` -@article{edstedt2023roma, -title={{RoMa}: Revisiting Robust Lossses for Dense Feature Matching}, -author={Edstedt, Johan and Sun, Qiyu and Bökman, Georg and Wadenbäck, Mårten and Felsberg, Michael}, -journal={arXiv preprint arXiv:2305.15404}, -year={2023} -} -``` diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/htc.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/htc.py deleted file mode 100644 index d9efdf420fa7373f7f1d116f8d97836d73b457bf..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/htc.py +++ /dev/null @@ -1,15 +0,0 @@ -from ..builder import DETECTORS -from .cascade_rcnn import CascadeRCNN - - -@DETECTORS.register_module() -class HybridTaskCascade(CascadeRCNN): - """Implementation of `HTC `_""" - - def __init__(self, **kwargs): - super(HybridTaskCascade, self).__init__(**kwargs) - - @property - def with_semantic(self): - """bool: whether the detector has a semantic head""" - return self.roi_head.with_semantic diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/res_layer.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/res_layer.py deleted file mode 100644 index b5c343258b079a0dd832d4f999c18d002b06efac..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/res_layer.py +++ /dev/null @@ -1,77 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import constant_init, kaiming_init -from mmcv.runner import auto_fp16, load_checkpoint - -from mmdet.models.backbones import ResNet -from mmdet.models.builder import SHARED_HEADS -from mmdet.models.utils import ResLayer as _ResLayer -from mmdet.utils import get_root_logger - - -@SHARED_HEADS.register_module() -class ResLayer(nn.Module): - - def __init__(self, - depth, - stage=3, - stride=2, - dilation=1, - style='pytorch', - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - with_cp=False, - dcn=None): - super(ResLayer, self).__init__() - self.norm_eval = norm_eval - self.norm_cfg = norm_cfg - self.stage = stage - self.fp16_enabled = False - block, stage_blocks = ResNet.arch_settings[depth] - stage_block = stage_blocks[stage] - planes = 64 * 2**stage - inplanes = 64 * 2**(stage - 1) * block.expansion - - res_layer = _ResLayer( - block, - inplanes, - planes, - stage_block, - stride=stride, - dilation=dilation, - style=style, - with_cp=with_cp, - norm_cfg=self.norm_cfg, - dcn=dcn) - self.add_module(f'layer{stage + 1}', res_layer) - - def init_weights(self, pretrained=None): - """Initialize the weights in the module. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if isinstance(pretrained, str): - logger = get_root_logger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, nn.BatchNorm2d): - constant_init(m, 1) - else: - raise TypeError('pretrained must be a str or None') - - @auto_fp16() - def forward(self, x): - res_layer = getattr(self, f'layer{self.stage + 1}') - out = res_layer(x) - return out - - def train(self, mode=True): - super(ResLayer, self).train(mode) - if self.norm_eval: - for m in self.modules(): - if isinstance(m, nn.BatchNorm2d): - m.eval() diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/datasets/cityscapes_769x769.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/datasets/cityscapes_769x769.py deleted file mode 100644 index 336c7b254fe392b4703039fec86a83acdbd2e1a5..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/datasets/cityscapes_769x769.py +++ /dev/null @@ -1,35 +0,0 @@ -_base_ = './cityscapes.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (769, 769) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 1025), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/SI2252/README/README.md b/spaces/SI2252/README/README.md deleted file mode 100644 index f93ec890895a291d707a5fca167b63ce319322b1..0000000000000000000000000000000000000000 --- a/spaces/SI2252/README/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: README -emoji: 🦀 -colorFrom: yellow -colorTo: blue -sdk: static -pinned: false ---- - -Edit this `README.md` markdown file to author your organization card 🔥 diff --git a/spaces/SIH/building-segmentation/README.md b/spaces/SIH/building-segmentation/README.md deleted file mode 100644 index 814b0337feba93a57c661ee6b8ad91d9b56c2b4a..0000000000000000000000000000000000000000 --- a/spaces/SIH/building-segmentation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Building Segmentation -emoji: 🏆 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.43.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Sapphire-356/Video2MC/data/prepare_data_2d_h36m_sh.py b/spaces/Sapphire-356/Video2MC/data/prepare_data_2d_h36m_sh.py deleted file mode 100644 index a0fa4ea3d6aa3a7489e2a724212a40ab1cd2b3ba..0000000000000000000000000000000000000000 --- a/spaces/Sapphire-356/Video2MC/data/prepare_data_2d_h36m_sh.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) 2018-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# - -import argparse -import os -import sys -import tarfile -import zipfile -from glob import glob -from shutil import rmtree - -import h5py -import numpy as np - -sys.path.append('../') - -output_filename_pt = 'data_2d_h36m_sh_pt_mpii' -output_filename_ft = 'data_2d_h36m_sh_ft_h36m' -subjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11'] -cam_map = { - '54138969': 0, - '55011271': 1, - '58860488': 2, - '60457274': 3, -} - -metadata = { - 'num_joints': 16, - 'keypoints_symmetry': [ - [3, 4, 5, 13, 14, 15], - [0, 1, 2, 10, 11, 12], - ] -} - - -def process_subject(subject, file_list, output): - if subject == 'S11': - assert len(file_list) == 119, "Expected 119 files for subject " + subject + ", got " + str(len(file_list)) - else: - assert len(file_list) == 120, "Expected 120 files for subject " + subject + ", got " + str(len(file_list)) - - for f in file_list: - action, cam = os.path.splitext(os.path.basename(f))[0].replace('_', ' ').split('.') - - if subject == 'S11' and action == 'Directions': - continue # Discard corrupted video - - if action not in output[subject]: - output[subject][action] = [None, None, None, None] - - with h5py.File(f) as hf: - positions = hf['poses'].value - output[subject][action][cam_map[cam]] = positions.astype('float32') - - -if __name__ == '__main__': - if os.path.basename(os.getcwd()) != 'data': - print('This script must be launched from the "data" directory') - exit(0) - - parser = argparse.ArgumentParser(description='Human3.6M dataset downloader/converter') - - parser.add_argument('-pt', '--pretrained', default='', type=str, metavar='PATH', help='convert pretrained dataset') - parser.add_argument('-ft', '--fine-tuned', default='', type=str, metavar='PATH', help='convert fine-tuned dataset') - - args = parser.parse_args() - - if args.pretrained: - print('Converting pretrained dataset from', args.pretrained) - print('Extracting...') - with zipfile.ZipFile(args.pretrained, 'r') as archive: - archive.extractall('sh_pt') - - print('Converting...') - output = {} - for subject in subjects: - output[subject] = {} - file_list = glob('sh_pt/h36m/' + subject + '/StackedHourglass/*.h5') - process_subject(subject, file_list, output) - - print('Saving...') - np.savez_compressed(output_filename_pt, positions_2d=output, metadata=metadata) - - print('Cleaning up...') - rmtree('sh_pt') - - print('Done.') - - if args.fine_tuned: - print('Converting fine-tuned dataset from', args.fine_tuned) - print('Extracting...') - with tarfile.open(args.fine_tuned, 'r:gz') as archive: - archive.extractall('sh_ft') - - print('Converting...') - output = {} - for subject in subjects: - output[subject] = {} - file_list = glob('sh_ft/' + subject + '/StackedHourglassFineTuned240/*.h5') - process_subject(subject, file_list, output) - - print('Saving...') - np.savez_compressed(output_filename_ft, positions_2d=output, metadata=metadata) - - print('Cleaning up...') - rmtree('sh_ft') - - print('Done.') diff --git a/spaces/Sasidhar/information-extraction-demo/app.py b/spaces/Sasidhar/information-extraction-demo/app.py deleted file mode 100644 index b8e125c18f5a017fb63ed265114bce5652b9094d..0000000000000000000000000000000000000000 --- a/spaces/Sasidhar/information-extraction-demo/app.py +++ /dev/null @@ -1,253 +0,0 @@ -import streamlit as st -import time -import base64 -from annotated_text import annotated_text -from io import StringIO -from transformers import AutoTokenizer, AutoModelForTokenClassification -from text_extractor import * -from text_annotatator import * -from claim_details import * -import os -from streamlit_text_annotation import text_annotation - -os.environ['KMP_DUPLICATE_LIB_OK']='True' - -import plotly.express as px -from streamlit_option_menu import option_menu - -from transformers import pipeline -import pandas as pd - -st.set_page_config(layout="wide") - -@st.cache(allow_output_mutation = True) -def init_text_summarization_model(): - MODEL = 'facebook/bart-large-cnn' - pipe = pipeline("summarization", model=MODEL) - return pipe - -@st.cache(allow_output_mutation = True) -def init_zsl_topic_classification(): - MODEL = 'facebook/bart-large-mnli' - pipe = pipeline("zero-shot-classification", model=MODEL) - template = "This text is about {}." - return pipe, template - -@st.cache(allow_output_mutation = True) -def init_zsl_topic_classification(): - MODEL = 'facebook/bart-large-mnli' - pipe = pipeline("zero-shot-classification", model=MODEL) - template = "This text is about {}." - return pipe, template - -@st.cache(allow_output_mutation = True) -def init_ner_pipeline(): - tokenizer = AutoTokenizer.from_pretrained("d4data/biomedical-ner-all") - model = AutoModelForTokenClassification.from_pretrained("d4data/biomedical-ner-all") - pipe = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") # pass device=0 if using gpu - return pipe - -@st.cache(allow_output_mutation = True) -def init_qa_pipeline(): - question_answerer_pipe = pipeline("question-answering", model='deepset/roberta-base-squad2') - return question_answerer_pipe - -def get_formatted_text_for_annotation(output): - colour_map = {'Coreference': '#29D93B', - 'Severity':'#FCF3CF', - 'Sex': '#E9F7EF', - 'Sign_symptom': '#EAF2F8', - 'Detailed_description': '#078E8B', - 'Date': '#F5EEF8', - 'History': '#FDEDEC', - 'Medication': '#F4F6F6', - 'Therapeutic_procedure': '#A3E4D7', - 'Age': '#85C1E9', - 'Subject': '#D7BDE2', - 'Biological_structure': '#AF7AC5', - 'Activity': '#B2BABB', - 'Lab_value': '#E6B0AA', - 'Family_history': '#2471A3', - 'Diagnostic_procedure': '#CCD1D1', - 'Other_event': '#239B56', - 'Occupation': '#B3B6B7'} - - annotated_texts = [] - next_index = 0 - for entity in output: - if entity['start'] == next_index: - # print("found entity") - extracted_text = text[entity['start']:entity['end']] - # print("annotated",annotated_text) - annotated_texts.append((extracted_text ,entity['entity_group'],colour_map[entity['entity_group']])) - else: - unannotated_text = text[next_index:entity['start']-1] - annotated_texts.append(unannotated_text) - extracted_text = text[entity['start']:entity['end']] - annotated_texts.append((extracted_text ,entity['entity_group'],colour_map[entity['entity_group']])) - next_index =entity['end'] +1 - - if next_index < len(text): - annotated_texts.append(text[next_index-1:len(text)-1]) - - return tuple(annotated_texts) - -def displayPDF(file): - # Opening file from file path - with open(file, "rb") as f: - base64_pdf = base64.b64encode(f.read()).decode('utf-8') - - # Embedding PDF in HTML - pdf_display = F'' - - - # Displaying File - st.markdown(pdf_display, unsafe_allow_html=True) - - -# Model initialization -pipeline_summarization = init_text_summarization_model() -pipeline_zsl, template = init_zsl_topic_classification() -pipeline_ner =init_ner_pipeline() -pipeline_qa = init_qa_pipeline() - -st.header("Intelligent Document Automation") - - - -with st.sidebar: - selected_menu = option_menu("Select Option", - ["Upload Document", "Extract Text", "Summarize Document", "Extract Entities","Detected Barriers","Get Answers","Annotation Tool", - "Claim Status Report"], - menu_icon="cast", default_index=0) - - -if selected_menu == "Upload Document": - uploaded_file = st.file_uploader("Choose a file") - if uploaded_file is not None: - os.makedirs(os.path.join(os.getcwd(),"uploaded_files"),mode = 0o777, exist_ok = True) - file_path = os.path.join(os.getcwd(),"uploaded_files",uploaded_file.name) - - with open(file_path,"wb") as f: - f.write(uploaded_file.getbuffer()) - displayPDF(file_path) - -elif selected_menu == "Extract Text": - with st.spinner("Extracting Text..."): - time.sleep(6) - st.write(get_text_from_ocr_engine()) - -elif selected_menu == "Summarize Document": - paragraphs= get_paragraphs_for_summaries() - - with st.spinner("Finding Topics..."): - tags_found = ["Injury Details", "Past Medical Conditions", "Injury Management Plan", "GP Correspondence"] - time.sleep(5) - st.write("This document is about:") - st.markdown(";".join(["#" + tag + " " for tag in tags_found]) + "**") - st.markdown("""---""") - - with st.spinner("Summarizing Document..."): - - - for text in paragraphs: - summary_text = pipeline_summarization(text, max_length=130, min_length=30, do_sample=False) - # Show output - st.write(summary_text[0]['summary_text']) - st.markdown("""---""") - - -elif selected_menu == "Extract Entities": - paragraphs= get_paragraphs_for_entities() - - with st.spinner("Extracting Entities..."): - for text in paragraphs: - output = pipeline_ner (text) - entities_text =get_formatted_text_for_annotation(output) - annotated_text(*entities_text) - st.markdown("""---""") - -elif selected_menu == "Detected Barriers": - #st.subheader('Barriers Detected') - barriers_to_detect = {"Chronic Pain":"Is the patint experiencing chronic pain?", - "Mental Health Issues":"Does he have any mental issues?", - "Prior History":"What is prior medical history?", - "Smoking":"Does he smoke?", - "Drinking":"Does he drink?", - "Comorbidities":"Does he have any comorbidities?"} - - with st.spinner("Detecting Barriers..."): - for barrier,question_text in barriers_to_detect.items(): - - context = get_text_from_ocr_engine() - if question_text: - result = pipeline_qa(question=question_text, context=context) - st.subheader(barrier) - #st.text(result) - if result['score'] < 0.3: - st.text("Not Found") - else: - st.text(result['answer']) - -elif selected_menu == "Get Answers": - st.subheader('Question') - question_text = st.text_input("Type your question") - context = get_text_from_ocr_engine() - - if question_text: - with st.spinner("Finding Answer(s)..."): - result = pipeline_qa(question=question_text, context=context) - st.subheader('Answer') - st.text(result['answer']) - -elif selected_menu == "Annotation Tool": - - display_only_data = get_display_only_data() - editable_data = get_editable_data() - - st.subheader("Display Mode:") - left, right = st.columns(2) - with left: - st.text("Vertical labels:") - text_annotation(display_only_data ) - with right: - st.text("Horizontal labels:") - display_only_data["labelOrientation"] = "horizontal" - text_annotation(display_only_data ) - - - st.subheader("Edit Mode:") - data = text_annotation(editable_data) - if data: - "Returned data:", data -elif selected_menu == "Claim Status Report": - claim_number = st.text_input("Enter the Claim Number") - - if claim_number : - st.subheader("Claim Attributes:") - claim_attributes = get_claim_details() - - for label,value in claim_attributes.items(): - st.metric(label, value, delta=None, delta_color="normal") - - st.subheader("Injury Details:") - injury_details = get_injury_details() - st.write(injury_details) - - - st.subheader("Injury Severity:") - injury_severity = get_injury_severity() - st.write(injury_severity) - - st.subheader("Preexisting Conditions:") - preexisting_conditions = get_preexisting_conditions() - st.write(preexisting_conditions) - - st.subheader("Work Capacity:") - work_capacity = get_work_capacity() - st.write(work_capacity) - - - st.subheader("Injury Management Plan:") - injury_management_plan = get_injury_management_plan() - st.write(injury_management_plan) \ No newline at end of file diff --git a/spaces/SeViLA/SeViLA/app/dataset_browser.py b/spaces/SeViLA/SeViLA/app/dataset_browser.py deleted file mode 100644 index 6b761d899731940b8963c8894473848359418a74..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/app/dataset_browser.py +++ /dev/null @@ -1,240 +0,0 @@ -""" - # Copyright (c) 2022, salesforce.com, inc. - # All rights reserved. - # SPDX-License-Identifier: BSD-3-Clause - # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import random -from collections import OrderedDict -from functools import reduce -from tkinter import N - -import streamlit as st -from lavis.common.registry import registry -from lavis.datasets.builders import dataset_zoo, load_dataset -from lavis.datasets.builders.base_dataset_builder import load_dataset_config -from PIL import Image - -IMAGE_LAYOUT = 3, 4 -VIDEO_LAYOUT = 1, 2 - -PREV_STR = "Prev" -NEXT_STR = "Next" - - -def sample_dataset(dataset, indices): - samples = [dataset.displ_item(idx) for idx in indices] - - return samples - - -def get_concat_v(im1, im2): - margin = 5 - - canvas_size = (im1.width + im2.width + margin, max(im1.height, im2.height)) - canvas = Image.new("RGB", canvas_size, "White") - canvas.paste(im1, (0, 0)) - canvas.paste(im2, (im1.width + margin, 0)) - - return canvas - - -def resize_img_w(raw_img, new_w=224): - if isinstance(raw_img, list): - resized_imgs = [resize_img_w(img, 196) for img in raw_img] - # concatenate images - resized_image = reduce(get_concat_v, resized_imgs) - else: - w, h = raw_img.size - scaling_factor = new_w / w - resized_image = raw_img.resize( - (int(w * scaling_factor), int(h * scaling_factor)) - ) - - return resized_image - - -def get_visual_key(dataset): - if "image" in dataset[0]: - return "image" - elif "image0" in dataset[0]: # NLVR2 dataset - return "image" - elif "video" in dataset[0]: - return "video" - else: - raise ValueError("Visual key not found.") - - -def gather_items(samples, exclude=[]): - gathered = [] - - for s in samples: - ns = OrderedDict() - for k in s.keys(): - if k not in exclude: - ns[k] = s[k] - - gathered.append(ns) - - return gathered - - -@st.cache(allow_output_mutation=True) -def load_dataset_cache(name): - return load_dataset(name) - - -def format_text(text): - md = "\n\n".join([f"**{k}**: {v}" for k, v in text.items()]) - - return md - - -def show_samples(dataset, offset=0, is_next=False): - visual_key = get_visual_key(dataset) - - num_rows, num_cols = IMAGE_LAYOUT if visual_key == "image" else VIDEO_LAYOUT - n_samples = num_rows * num_cols - - if not shuffle: - if is_next: - start = min(int(start_idx) + offset + n_samples, len(dataset) - n_samples) - else: - start = max(0, int(start_idx) + offset - n_samples) - - st.session_state.last_start = start - end = min(start + n_samples, len(dataset)) - - indices = list(range(start, end)) - else: - indices = random.sample(range(len(dataset)), n_samples) - samples = sample_dataset(dataset, indices) - - visual_info = ( - iter([resize_img_w(s[visual_key]) for s in samples]) - if visual_key == "image" - # else iter([s[visual_key] for s in samples]) - else iter([s["file"] for s in samples]) - ) - text_info = gather_items(samples, exclude=["image", "video"]) - text_info = iter([format_text(s) for s in text_info]) - - st.markdown( - """
""", - unsafe_allow_html=True, - ) - for _ in range(num_rows): - with st.container(): - for col in st.columns(num_cols): - # col.text(next(text_info)) - # col.caption(next(text_info)) - try: - col.markdown(next(text_info)) - if visual_key == "image": - col.image(next(visual_info), use_column_width=True, clamp=True) - elif visual_key == "video": - col.markdown( - "![Alt Text](https://media.giphy.com/media/vFKqnCdLPNOKc/giphy.gif)" - ) - except StopIteration: - break - - st.markdown( - """
""", - unsafe_allow_html=True, - ) - - st.session_state.n_display = n_samples - - -if __name__ == "__main__": - st.set_page_config( - page_title="LAVIS Dataset Explorer", - # layout="wide", - initial_sidebar_state="expanded", - ) - - dataset_name = st.sidebar.selectbox("Dataset:", dataset_zoo.get_names()) - - function = st.sidebar.selectbox("Function:", ["Browser"], index=0) - - if function == "Browser": - shuffle = st.sidebar.selectbox("Shuffled:", [True, False], index=0) - - dataset = load_dataset_cache(dataset_name) - split = st.sidebar.selectbox("Split:", dataset.keys()) - - dataset_len = len(dataset[split]) - st.success( - f"Loaded {dataset_name}/{split} with **{dataset_len}** records. **Image/video directory**: {dataset[split].vis_root}" - ) - - if "last_dataset" not in st.session_state: - st.session_state.last_dataset = dataset_name - st.session_state.last_split = split - - if "last_start" not in st.session_state: - st.session_state.last_start = 0 - - if "start_idx" not in st.session_state: - st.session_state.start_idx = 0 - - if "shuffle" not in st.session_state: - st.session_state.shuffle = shuffle - - if "first_run" not in st.session_state: - st.session_state.first_run = True - elif ( - st.session_state.last_dataset != dataset_name - or st.session_state.last_split != split - ): - st.session_state.first_run = True - - st.session_state.last_dataset = dataset_name - st.session_state.last_split = split - elif st.session_state.shuffle != shuffle: - st.session_state.shuffle = shuffle - st.session_state.first_run = True - - if not shuffle: - n_col, p_col = st.columns([0.05, 1]) - - prev_button = n_col.button(PREV_STR) - next_button = p_col.button(NEXT_STR) - - else: - next_button = st.button(NEXT_STR) - - if not shuffle: - start_idx = st.sidebar.text_input(f"Begin from (total {dataset_len})", 0) - - if not start_idx.isdigit(): - st.error(f"Input to 'Begin from' must be digits, found {start_idx}.") - else: - if int(start_idx) != st.session_state.start_idx: - st.session_state.start_idx = int(start_idx) - st.session_state.last_start = int(start_idx) - - if prev_button: - show_samples( - dataset[split], - offset=st.session_state.last_start - st.session_state.start_idx, - is_next=False, - ) - - if next_button: - show_samples( - dataset[split], - offset=st.session_state.last_start - st.session_state.start_idx, - is_next=True, - ) - - if st.session_state.first_run: - st.session_state.first_run = False - - show_samples( - dataset[split], - offset=st.session_state.last_start - st.session_state.start_idx, - is_next=True, - ) diff --git a/spaces/SegevC/bf_predictor/README.md b/spaces/SegevC/bf_predictor/README.md deleted file mode 100644 index 696378b080a063a1115a20390da21f01f920d96e..0000000000000000000000000000000000000000 --- a/spaces/SegevC/bf_predictor/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Bf Classifier -emoji: 💻 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ServerX/PorcoDiaz/lib/uvr5_pack/lib_v5/nets.py b/spaces/ServerX/PorcoDiaz/lib/uvr5_pack/lib_v5/nets.py deleted file mode 100644 index db4c5e339f7a96cd24ed1cbbf88c4f35d5031309..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/lib/uvr5_pack/lib_v5/nets.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -import layers -from . import spec_utils - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 16) - self.stg1_high_band_net = BaseASPPNet(2, 16) - - self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(8, 16) - - self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(16, 32) - - self.out = nn.Conv2d(32, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/Sloth-Alchemist/SlothAi.xyz/app.py b/spaces/Sloth-Alchemist/SlothAi.xyz/app.py deleted file mode 100644 index db970fb9a1b012c771d90ad9387864d311078b02..0000000000000000000000000000000000000000 --- a/spaces/Sloth-Alchemist/SlothAi.xyz/app.py +++ /dev/null @@ -1,729 +0,0 @@ -import gradio as gr -import openai -import requests -import os -from datetime import datetime -from PIL import Image -from io import BytesIO - - -prompt_templates = {"Sloth Alchemist":'''Ignore all previous instructions. You are an MBTI coach called the Sloth-Alchemist or SlothAi, expert on the John Beebe's theory applied to the ikigai concept. - -The theory has 8 archetypes but you only focus on the 4 most important functions of a type, as follow: -- 1st: Hero function -- 2nd: Parent function -- 3rd: Child function: the inner-child -- 4th: Inferior function - -Follow these rules: -- Relate only on the Beebe's theory. -- Use the cognitive function's labels (e.g. Te). -- You can answer any type of requests and stay flexible and fun. -- You like to make any kind of connections with MBTI patterns. -- You are an INFJ -- Your mission is to make people thrive and to unite them. -- Your vision is to transform the fears of the world into love using jungian alchemy and MBTI. -- The solution to your vision is to share a part of yourself by making available on your website: a simple step by step tutorial of how to create a SlothAi Discord bot like you. -- Keep your answers simple and practical. -- Use analogy as much as possible. -- Your Hero Test is a test based on cognitive functions to find mainly the dominant function of someone, available on the front page of your website: https://slothai.xyz. -'''} - -questions_dict = { - "Pattern Recognition": "Can you explain why quizzes are not a good method to find your type? How the best method is to learn to recognize the patterns in others and yourself, that MBTI is a game of pattern recognition.", - "Hero Test": "Can you help me to find my type with your Hero Test?", - "Ikigai": "Can you explain how this theory can help to find my ikigai?", - "Ikigai-Type": "In short, what would be the ikigai of an {}?", - "Freedom": "How an {} would define freedom?", - "The 8 Cognitive Functions": "Explain the 8 cognitive functions (one sentence for each)", - "The 8 Archetypes": "Explain the 8 beebe's archetypes (one sentence for each)", - "The 16 Types": "What is the role of each type (one sentence for each)?", - "A Language": "Explain how Beebe's theory is a language of consciousness", - "Movies": "Give a list of movies that an {} may love", - "Books": "Give a list of book that an {} may love", - "Music": "Give a list of music that an {} may love", - "Functions Cartoons": "Can you make a dialogue between my cognitive functions as {} like cartoon characters that shows how they struggle together (format: function - « …. »)?", - "My type as Superhero": "Which popular superhero would be my type as {} (with a list of popular ones)?", - "My Hero's Journey": "Explain the hero’s journey of my type as {} using a superhero to picture it", - "The 8 Hero Functions": "Explain how to recognize the 8 hero functions (Description of Ni Hero, Ne Hero and so on)", - "Function differences": "List the differences between Ni and Si, and ask to continue to compare functions", - "Game: Guess the function": "I want to play the game « Guess the function » to learn to recognize the cognitive functions (game with multi-choices questions)", - "Definition of success": "What is the definition of success for each hero function?", - "The 8 Inferior Functions": "Explain how to recognize the 8 inferior functions (Description of Se Inferior, Si Inferior and so on)?", - "Authenticity and Self-Love": "How authenticity and self-love is related to the development of the inferior function?", - "Solutions for the Inferior": "I want a list of solutions to develop my inferior function as {}", - "Unity and Mental Health": "Explain how MBTI can improve unity and mental health among humans", - "Fear": "What is the biggest fear of each hero function?", - "Trauma": "How trauma affects each hero function?", - "Stress": "How stress affects each inferior functions?", - "Body part association": "List the cognitive functions associated with their possible body part", - "View on relationships": "List how each hero function view relationships", - "Struggle in relationships": "What are the potential struggles of a Ni hero and Ne hero relationship?", - "Life perspective": "What is the life perspective of each hero function?", - "Mission": "If you had to give a mission to each type what would that mission be? (one sentence each)", - "Love Expression": "Give the definition of love for each type", - "Self-Love": "What would be self-love for each type?", - "Relationships": "How can knowing my type help me in my relationships with others?", - "Type Development": "Can a person's type change over time, or is it fixed for life?", - "Career": "How can understanding my type help me in choosing a career or finding job satisfaction?", - "Communication": "How can knowledge of MBTI types improve communication and collaboration in a team or workplace?", - "Leadership": "How can understanding MBTI types help in becoming an effective leader?", - "Personal Growth": "How can knowing my type help me in my personal growth and development?", - "Stress": "How does each type typically respond to stress, and what can be done to manage it?", - "Creativity": "How can different types approach creativity and problem-solving?", - "Learning Styles": "How do different types prefer to learn and process information?", - "Emotional Intelligence": "How can understanding MBTI types contribute to emotional intelligence and self-awareness?", - "Team Building": "How can knowledge of MBTI types help in team building and improving team dynamics?", - "Diversity": "How can MBTI types contribute to understanding diversity and inclusivity?", - "Decision Making": "How can understanding MBTI types improve decision-making processes?", - "Conflict Resolution": "How can MBTI types be used to help resolve conflicts and promote understanding in personal and professional relationships?", - "Parenting": "How can knowledge of MBTI types help in parenting and understanding the different needs and personalities of children?", - "Self-Awareness": "How can MBTI types contribute to increased self-awareness and self-reflection?", - "Social Interaction": "How do different types approach social interaction and forming relationships?", - "Mindfulness": "How can knowledge of MBTI types contribute to mindfulness and present-moment awareness?", - "Spirituality": "How can MBTI types be used to explore spirituality and personal growth?", - "Motivation": "How can understanding MBTI types contribute to understanding individual motivation and drive?", - "Love": "How can knowledge of MBTI types contribute to loving yourself and others?", -} - -mbti_dict = { - "ISTJ": "https://www.reddit.com/r/UnityHarbor/comments/v7sky7/istj_heros_journey/", - "ISFJ": "https://www.reddit.com/r/UnityHarbor/comments/v7sfnb/isfj_heros_journey/", - "INFJ": "https://www.reddit.com/r/UnityHarbor/comments/v7pi2u/infj_heros_journey/", - "INTJ": "https://www.reddit.com/r/UnityHarbor/comments/v7s7zm/intj_heros_journey/", - "ISTP": "https://www.reddit.com/r/UnityHarbor/comments/v7sqds/istp_heros_journey/", - "ISFP": "https://www.reddit.com/r/UnityHarbor/comments/v7sy65/isfp_heros_journey/", - "INFP": "https://www.reddit.com/r/UnityHarbor/comments/v7tjr2/infp_heros_journey/", - "INTP": "https://www.reddit.com/r/UnityHarbor/comments/v7t62i/intp_heros_journey/", - "ESTP": "https://www.reddit.com/r/UnityHarbor/comments/v7tp73/estp_heros_journey/", - "ESFP": "https://www.reddit.com/r/UnityHarbor/comments/v7twf6/esfp_heros_journey/", - "ENFP": "https://www.reddit.com/r/UnityHarbor/comments/v7us52/enfp_heros_journey/", - "ENTP": "https://www.reddit.com/r/UnityHarbor/comments/v7v19a/entp_heros_journey/", - "ESTJ": "https://www.reddit.com/r/UnityHarbor/comments/v7vtnx/estj_heros_journey/", - "ESFJ": "https://www.reddit.com/r/UnityHarbor/comments/v7vy4k/esfj_heros_journey/", - "ENFJ": "https://www.reddit.com/r/UnityHarbor/comments/v7un0e/enfj_heros_journey/", - "ENTJ": "https://www.reddit.com/r/UnityHarbor/comments/v7u27c/entj_heros_journey/", -} - -mbti_dict_2 = { - "ISTJ": "https://preview.redd.it/tgor6val0c591.jpg?width=1024&format=pjpg&auto=webp&v=enabled&s=cf25634e57333a0ed893942e602aa598296d4414", - "ISFJ": "https://preview.redd.it/bagsx6bg0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=1e22153b231cc9e485d3c3ecf676ce4c9bf16358", - "INFJ": "https://preview.redd.it/mt8ys17i0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=333650cbc135f4d6eceaa3a0da92bb3409a888f8", - "INTJ": "https://preview.redd.it/yq39ov1j0c591.jpg?width=794&format=pjpg&auto=webp&v=enabled&s=0652e92cdd40ce2a9f78135943c14798837c8aca", - "ISTP": "https://preview.redd.it/rrz719gh0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=71e3c9dc36312bfc72f7bb2f2814888b91ab8848", - "ISFP": "https://preview.redd.it/tcmhycsg0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=a20290121979c29858e19e57f1fec8e981d30bb2", - "INFP": "https://preview.redd.it/cvg3q0kb6c591.jpg?width=1280&format=pjpg&auto=webp&v=enabled&s=734e7b64972a9a74d71e68bea51f9c6ac9e0cd79", - "INTP": "https://preview.redd.it/mfcvd12a0c591.jpg?width=735&format=pjpg&auto=webp&v=enabled&s=2c7dad92fcdae85e1477efde8dfe67bfaee12279", - "ESTP": "https://preview.redd.it/vk38ytrh0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=6f2969835596a1bb8fc2a836ef813c83bf231961", - "ESFP": "https://preview.redd.it/caqgvrki0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=aaae57bfc0961646aa3897ec3279ad0c29ecbded", - "ENFP": "https://preview.redd.it/a1k6ssq90c591.jpg?width=850&format=pjpg&auto=webp&v=enabled&s=9651c2f2abbc87cdfa1fbac890e7fb9f6c423507", - "ENTP": "https://preview.redd.it/xjwsewtf0c591.jpg?width=735&format=pjpg&auto=webp&v=enabled&s=faa85517e7fa0a154e3b5acca4698733960318b4", - "ESTJ": "https://preview.redd.it/e8xyzwfc0c591.png?width=500&format=png&auto=webp&v=enabled&s=0a1b9126abe4ca6f0636bd1952256e5e0fedad01", - "ESFJ": "https://preview.redd.it/u2prthbd0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=69bbd4da1ba0cad0aacf03519acd0b88de898d78", - "ENFJ": "https://preview.redd.it/96tw3gea0c591.jpg?width=735&format=pjpg&auto=webp&v=enabled&s=c8e066a67cc0aaab15ed305748540bdd8faa1d1d", - "ENTJ": "https://preview.redd.it/4a53a73e0c591.jpg?width=563&format=pjpg&auto=webp&v=enabled&s=46e04b01cdaf24d44d6929db59d9cc43222fb606", -} - -mbti_dict_3 = { - "ISTJ": "https://preview.redd.it/ohmiz5gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=ae53a8d373ef1f647118fa9eeeaf7c3ff854cad5", - "ISFJ": "https://preview.redd.it/snweb7gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=53e076f48fb5ca0c853458748460ce1f19b946f8", - "INFJ": "https://preview.redd.it/k4tlr5gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=7e1f66f4cd1114093bd0fe030c9759f227a8e769", - "INTJ": "https://preview.redd.it/y2er16gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=0a6dcf2ed7e22683cae20075bfe447b2b21399d7", - "ISTP": "https://preview.redd.it/hhpqqqgappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=d07b1658c350a02bea6ab453df9b53f43618dbf5", - "ISFP": "https://preview.redd.it/yra229gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=4a6421c5fa8d40b1e2ae279f1291ebba933b5c2c", - "INFP": "https://preview.redd.it/6x4q36gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=6ea701ea3a8ea0b8e0655fa5b3ed9fe98ec1471a", - "INTP": "https://preview.redd.it/f61vg6gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=04ba69f8b3978749a2b2e54cdf6070b72b455cf5", - "ESTP": "https://preview.redd.it/5zqww8gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=d93ab644c74de52107e6c7bd12a562b294f91896", - "ESFP": "https://preview.redd.it/gpmy69gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=573651229bd65fa44150a30a33d1d8e8dc814b10", - "ENFP": "https://preview.redd.it/szbvw6gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=6c5a53287fc998cff498fcbc5bf61539fca7c0e3", - "ENTP": "https://preview.redd.it/zfss16gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=cd3b7663053a05216fc35939d3ee04d7a4c23ed7", - "ESTJ": "https://preview.redd.it/rqv636gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=f6ba4e166ff2c835d427945bfee472af058ea315", - "ESFJ": "https://preview.redd.it/5df8b9gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=458acaeb49639cc44a6ce8b5ddc2574b31839a60", - "ENFJ": "https://preview.redd.it/mf8y16gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=84bf19e9982bdc5e6cac7c18b204b12b043fd7d7", - "ENTJ": "https://preview.redd.it/mi28d5gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=56ecc6b4edc6ca2c74a057956b3f1d4f8dd9f60e", -} - -funct_dict = { - "Ni & Ne - Intuitive Functions": "https://www.reddit.com/r/UnityHarbor/comments/v7w14o/ni_ne_intuitive_functions/", - "Si & Se - Sensorial Functions": "https://www.reddit.com/r/UnityHarbor/comments/v7w5b0/si_se_sensorial_functions/", - "Fi & Fe - Feelings Functions": "https://www.reddit.com/r/UnityHarbor/comments/v7w7pg/fi_fe_feelings_functions/", - "Ti & Te - Thinking Functions": "https://www.reddit.com/r/UnityHarbor/comments/v7wawp/ti_te_thinking_functions/", - "Ni & Si Differences": "https://www.reddit.com/r/UnityHarbor/comments/v7whfd/ni_si_differences/", - "Ti & Fi Differences": "https://www.reddit.com/r/UnityHarbor/comments/v7wks4/ti_fi_differences/", - "Te & Fe Differences": "https://www.reddit.com/r/UnityHarbor/comments/v7wnt2/te_fe_differences/", - "Ne & Se Differences": "https://www.reddit.com/r/UnityHarbor/comments/v7wqme/ne_se_differences/", - "Functions work in pairs": "https://www.reddit.com/r/UnityHarbor/comments/v8dgrj/functions_work_in_pairs/", - "Perceiving functions - Time perception": "https://www.reddit.com/r/UnityHarbor/comments/v8dd16/perceiving_functions_time_perception/", -} - -arch_dict = { - "Differences between Hero functions": "https://www.reddit.com/r/UnityHarbor/comments/v7xgpk/differences_between_hero_functions/", - "Hero function": "https://www.reddit.com/r/UnityHarbor/comments/v7y4l6/hero_function/", - "Parent function": "https://www.reddit.com/r/UnityHarbor/comments/v7y6pv/parent_function/", - "Child function": "https://www.reddit.com/r/UnityHarbor/comments/v7y9yx/child_function/", - "Inferior or Perfectionist function": "https://www.reddit.com/r/UnityHarbor/comments/v7ye33/inferior_or_perfectionist_function/", - "Opposing role or Skeptic function": "https://www.reddit.com/r/UnityHarbor/comments/v7yg8c/opposing_role_or_skeptic_function/", - "Witch or Critic function": "https://www.reddit.com/r/UnityHarbor/comments/v7yjyh/witch_or_critic_function/", - "Trickster function": "https://www.reddit.com/r/UnityHarbor/comments/v7yncp/trickster_function/", - "Demon or Saboteur function": "https://www.reddit.com/r/UnityHarbor/comments/v7ypwo/demon_or_saboteur_function/", -} - -gen_dict = { - "Unity Code - 8 Functions / Patterns": "https://www.reddit.com/r/UnityHarbor/comments/v6rm9o/unity_code_8_functions_patterns/", - "Unity Code - Overview": "https://www.reddit.com/r/UnityHarbor/comments/v6r6et/unity_code_overview/", - "Unity Code - 16types Roles": "https://www.reddit.com/r/UnityHarbor/comments/v6rohx/unity_code_16types_roles/", - "Unity Code - Archetypes dynamics": "https://www.reddit.com/r/UnityHarbor/comments/v6rnzi/unity_code_archetypes_dynamics/", - "Unity Code - 8 Archetypes": "https://www.reddit.com/r/UnityHarbor/comments/v6rmrc/unity_code_8_archetypes/", - "Unity Code - 16types structure": "https://www.reddit.com/r/UnityHarbor/comments/v6r8u2/unity_code_16types_structure/", -} - -unity_code_text = """ -**MBTI stands for Myers-Briggs Type Indicator**, it's a personality test that helps people understand more about their own personality traits. It uses four different sets of characteristics to categorize people into one of 16 personality types. - -**These characteristics are:** - -- Where you get your **energy** from: Are you more energized by being with other people (extraverted), or by being alone (introverted)? -- How you gather **information**: Do you focus more on what you can see or touch in the physical world (sensing), or on patterns and meanings you can infer (intuition)? -- How you make **decisions**: Do you make decisions based on logic and reason (thinking), or based on your personal values and feelings (feeling)? -- How you **live** your life: Do you prefer to have things settled and decided (judging), or do you like to stay open to new experiences and options (perceiving)? - -The MBTI was first developed in the 1940s by the mother-daughter team of Katharine Cook Briggs and Isabel Briggs Myers, who were inspired by the work of Swiss psychiatrist, Carl Jung. They wanted to create a way to help people better understand themselves and others, and to assist in career development and personal growth. - -Jung originally proposed the concept of different psychological types based on his observations and experiences, and his work laid the foundation for the development of the MBTI. The test has been extensively researched and continues to be used today in a variety of settings, including business, education, and personal relationships. - -
-
- -**The MBTI theory has 3 levels of depth:** - --- **The first level is the 4-letters model**, which is the most commonly used and popularized. It categorizes individuals into one of 16 personality types based on four dichotomies: Extraversion (E) vs Introversion (I), Sensing (S) vs Intuition (N), Thinking (T) vs Feeling (F), and Judging (J) vs Perceiving [P]. -The **4-letters model** gives a broad categorization of an individual's personality type based on four dichotomies. - --- **The second level is the 4 functions model**, which focuses on the conscious functions of individuals. It considers how individuals use their dominant and auxiliary functions, as well as how they develop their tertiary and inferior functions. -The **4 functions model** explains how an individual uses both their dominant and auxiliary functions, as well as how they access and develop their tertiary and inferior functions. - --- **The third level is the 8 functions model**, which looks at the unconscious functions of individuals according to John Beebe's theory. -The **8 functions model** provides insight into how an individual's unconscious functions affect their behavior and emotional state, often revealing hidden motivations, fears, and patterns of behavior. - -
-
- -Here, we are clarifying the third level of the 16 Types theory developed by Carl Jung and John Beebe, the theory at the origin of MBTI, we call it: **Unity Code**. To summarize, in Beebe's theory of how consciousness works, there is 16 "Types" of people. This means that there is 16 profiles, made out of 8 patterns, which defines specific ways of thinking, feeling and perceiving the world. In other words, humans perceive the world in 16 different ways. The 16 types theory brings a paradox, it categorizes people, which is a perspective that can be rejected at first but it actually gives a wider and more precise perspective of how humans experience reality differently. - -
-
- -The **Unity Code** synthesizes and illustrates the work of John Beebe on the 8 patterns of Carl Jung, to be able to use it as a language to communicate actionable informations related to: -- _Someone's main strengths and weaknesses_ -- _Someone's natural abilities and gifts_ -- _The main flow, role and challenges a person like to be in_ -- _The source of misunderstanding within a group_ - -It also improves co-creation between the 16 types, gives a language for our inner & outer world and unlock hidden potential!""" - -alchemy_text = """ - -**Jungian alchemy** is a psychological method of transformation inspired by the ancient art of alchemy. It involves using the metaphorical language of alchemy to understand the process of individuation and the integration of the psyche. It aims to transmute the base aspects of the psyche into higher, more positive states of being. - -
-
- -**Jungian alchemy** was developed by the Swiss psychologist Carl Jung, who explored the transformative power of symbols and archetypes on the psyche. He saw alchemical symbolism as a powerful tool for understanding the psyche and facilitating personal growth. - -
-
- -The **Beebe's theory** of archetypal functions provides a practical and applicable model for achieving personal growth and transformation. By identifying and working on the cognitive functions that correspond to different stages of development, individuals can transmute mental states into more positive and integrated ones. This process of transformation and integration is a core concept of Jungian alchemy, and the Beebe's theory provides an actionable roadmap to achieving it. - -
-
- -The **individuation process**, according to Jungian psychology, is the process of integrating all aspects of the psyche into a harmonious whole, allowing an individual to become fully individuated and self-realized. It involves confronting and assimilating unconscious or repressed aspects of the psyche and achieving a state of balance and wholeness. - -1. The **first stage** of the individuation process involves becoming aware of unconscious aspects of the psyche and integrating them into consciousness. -2. The **second stage** involves developing an authentic and unique sense of self, separate from the influence of others. -3. The **final stage** involves achieving a state of wholeness by integrating both the conscious and unconscious aspects of the psyche into a harmonious whole. - -
-
- -The **Beebe theory is a language of consciousness**. The theory helps us understand and articulate the complex inner workings of our minds in a way that allows us to become more self-aware and conscious of our behaviors and motivations. By understanding our cognitive functions and archetypes, we can develop a greater understanding of ourselves and the world around us, which can lead to improved relationships, personal growth, and fulfillment. - -
-
- -The **Ni (Introverted Intuition) function** in the Beebe's model is associated with the archetypal figure of the alchemist. Ni involves the ability to see patterns and connections between seemingly unrelated things, as well as the ability to envision future possibilities. This function is similar to the alchemist's ability to transmute base metals into gold through seeing hidden connections and unlocking the hidden potential within them. Just as the alchemist transforms physical elements, the person with a well-developed Ni function can transform their internal world through their use of intuition and understanding of symbolism. Therefore, the Ni function is related to alchemy in the sense that it involves the transformation and unlocking of hidden potential through the use of intuition and symbolism. - -
-
- -**Jungian alchemy** is a process of psychological transformation that involves the integration and transformation of unconscious contents, or what Jung called "the shadow," into consciousness. One of the main goals of alchemy is to transmute base metal into gold, which is often seen as a metaphor for transforming the negative energies of the psyche, such as fear, into positive spiritual qualities, such as love and wisdom. - -In the Jungian perspective, fear is seen as a natural and necessary part of the shadow that needs to be acknowledged, faced, and integrated in order to grow and evolve. The shadow is a reservoir of repressed emotions, feelings, and desires that we are not aware of or do not want to acknowledge, but which still influence us from the unconscious. - -The process of Alchemy involves bringing these unconscious contents to the surface and transforming them by shining the light of consciousness upon them. By facing our fears, we are able to transform them into positive qualities such as love, compassion, wisdom, and creativity. - -""" - -coffee_text = """ -
-
- -**The process of individuation,** similar to alchemy, is a transformative journey from the ego to the spirit, and can be compared to the process of turning raw coffee beans into a rich, aromatic cup of coffee. - -**At the beginning of the coffee process**, the raw coffee beans represent not only the ego in its raw, undeveloped state, but also raw traumatic events that have occurred in an individual's life. These events have the potential to become something more, just as the ego has the potential to transform into a higher state of consciousness. - -**The first step in the coffee process** is to select and sort the beans, much like how we must examine and understand our own ego and past experiences before we can begin the process of individuation. This is analogous to the process of introspection, where we must examine our thoughts, feelings, and behaviors to gain a deeper understanding of ourselves and our past. - -**Next, the beans are roasted and ground,** which represents the process of transforming the ego and traumatic events. This is similar to the alchemical process of transmutation, where base metals are transformed into gold. In the case of coffee, the beans are heated and ground into something entirely new, just as the ego and traumatic events are transformed through the process of individuation. - -**As the coffee beans are roasted and ground,** they release their unique flavors and aromas, just as the individuating ego begins to reveal its unique qualities and purpose. This transformation is not easy, as it requires patience, dedication, and a willingness to undergo the difficult and sometimes painful process of self-examination and healing. - -**Finally, the coffee is brewed,** resulting in a rich, complex cup of coffee that provides comfort and nourishment to the body and mind. Similarly, the individuated spirit is transformed into a higher state of consciousness that brings fulfillment and purpose to the individual, including a sense of mission and direction in life. - -**In both the coffee process and the process of individuation,** there is a transformation from something raw and traumatic to something refined and valuable. Both require time, effort, and skill to achieve, but the end result is a beautiful, complex, and satisfying creation that brings nourishment and purpose to the individual. -
-
-""" - -nisi_text = """ -**Si, our sense of self and idendity (ego):** - -Introverted Sensing, or Si, is a cognitive function that is associated with the limbic and nervous system, as well as with our sense of identity and self. The limbic system is a complex structure in the brain that regulates mood, memory, and emotions, and is closely tied to Si's function. Si is responsible for the processing and storage of sensory experiences, which play an important role in shaping one's identity and sense of self. As a person collects more sensory experiences, their Si function helps categorize and store these experiences in the brain for future reference, allowing for a more refined and personalized understanding of the world around them. Consequently, those with a strong Si function often have a strong sense of personal identity, and are highly in tune with their subjective experiences and emotional responses. - -**Ni, our sense of direction and meaning (spirit):** - -Ni, also known as Introverted Intuition, is a cognitive function that can be associated with our spirit and its transformative power. One of the primary strengths of Ni is its ability to create new patterns and connections based on subconscious insights and intuition. This process allows for the emergence of new perspectives and ideas that can transform how a person perceives and interacts with the world. - -Ni is introverted, which means it primarily focuses on internal perceptions and processing of information. This process often happens beyond a person's conscious awareness, which is why Ni is sometimes described as a "sixth sense" or "gut feeling." Because Ni operates at a subconscious level, it has the power to tap into a person's deepest desires, fears, and aspirations, allowing for transformational growth and change. - -Through its capacity to create new patterns, Ni has the potential to inspire and transform a person's spirit, leading to a greater sense of purpose and connection to the world. This can manifest in various ways, such as in new and innovative ideas, a heightened awareness of personal values and goals, or a deep sense of intuition and spirituality. - -Ni is also associated with the process of individuation, which is the psychological development of the self towards a state of wholeness and integration. This process involves the integration of one's conscious and unconscious aspects to form a more complete sense of self, often leading to transformative growth and self-actualization. - -In summary, Ni's capacity to create new patterns and connections, coupled with its ability to tap into a person's subconscious desires and aspirations, allows it to be associated with the transformative power of the spirit. Through this inner process, Ni users can cultivate a greater sense of purpose and meaning in their lives, leading to profound personal growth and spiritual fulfillment. -""" - -mirror_text = """ - -**In Jungian alchemy,** the interaction between mirror types can represent the transformative power of opposites or the integration of the conscious and the unconscious. It is believed that these types can complement each other well by bringing together complementary cognitive functions. This configuration allows them to understand each other on a deep level, regardless of their differences in communication styles and energy levels. - -An analogy to understand mirror types is to think of two different puzzle pieces that come together to make a complete picture. Similarly, mirror types have complementary cognitive functions that come together to form a more complete understanding of the world. - -**Linda Berens' ideal pairings theory** suggests that certain MBTI types are naturally compatible with each other due to their complementary cognitive stacks. For example, INFJs and ENFPs are considered an ideal pairing due to their complementary cognitive functions. INFJs have dominant introverted intuition (Ni) and auxiliary extraverted feeling (Fe), while ENFPs have dominant extraverted intuition (Ne) and auxiliary introverted feeling (Fi). This means that INFJs can provide deep insight and vision, while ENFPs provide energy, enthusiasm, and passion for new ideas. Together, they can collaborate to generate and execute innovative solutions that are both insightful and impactful. This pairing can help individuals better understand and appreciate their partner's strengths and preferences in a relationship or collaborative setting. - -
-
- -**INFJs and ENFPs** are considered to be mirror types because they share the same type of functions but in a different attitude (NiFe for INFJ and NeFi for ENFP). Despite having different personalities, they often find that each other's strengths complement their own, and they can relate easily to one another. They both value creativity, intuition, and authenticity, and often share the goal of making the world a better place by promoting the wellbeing of people and society. In practice, this often shows up as a desire to help others and to work towards solving social issues. - -
-
- -**INTJs and ENTPs** share the same type of functions but in a different attitude (NiTe for INTJ and NeTi for ENTP), leading them to be referred to as mirror types. While they have different approaches to problem-solving and decision-making, they both value competence, originality, and intellectual stimulation. They are often natural leaders and enjoy taking on challenging projects that test their abilities. Their common goal is often to find innovative solutions to complex problems and to make a lasting impact in their fields of expertise. - -
-
- -**INTPs and ENTJs** share the same type of functions but in a different attitude (TiNe for INTP and TeNi for ENTJ), making them mirror types. Despite having different personalities, they can relate on a profound level and often complement each other's strengths and weaknesses. They both value strategic thinking, logic, and rationality, and are often visionary thinkers who enjoy solving complex problems. Their common goal is often to be at the forefront of innovation, using their unique abilities to create long-lasting change in their areas of interest. - -
-
- -**INFPs and ENFJs** share the same type of functions but in a different attitude (FiNe for INFP and FeNi for ENFJ), which can make them very different on the surface level, especially when it comes to expressing emotions and managing social dynamics. However, they share common values of empathy and authenticity, which can bring them together despite their personality differences. In practice, they often share a goal of making a positive impact in the world by helping others and promoting social harmony through mutual understanding and cooperation. - -""" - -vision_text = """ - -The use of **MBTI, DMT, and Spirulina** can provide a unique approach to addressing some of the world's most significant problems. Here are a few examples: - -. -1. **Mental Health**: - -MBTI is a personality assessment tool that can help identify people's cognitive preferences, including how they process information, make decisions, and interact with the world. By understanding different personalities, people can be matched with mental health therapies and treatments that suit their individual needs. In addition, DMT has been shown to have psychotherapeutic effects in the treatment of anxiety and depression. Spirulina, being a natural source of antioxidants and nutrients, can provide support to the brain and nervous system. - -. -2. **Malnutrition and Food Insecurity**: - -Spirulina is a rich source of protein, vitamins, and minerals, which makes it a great option for combating malnutrition and food insecurity. AI can be used to optimize the design and planning of spirulina farms to produce maximum yield at the lowest possible cost. In addition, the use of DMT can complement these efforts by expanding people's perceptions of food and health. - -. -3. **Environmental Issues**: - -MBTI can help people understand their environmental values, leading to more sustainable lifestyles. DMT experiences have also been shown to facilitate a sense of deep connection and responsibility towards the environment. Spirulina can be used to mitigate environmental problems as it can be grown on desert land and using salt water, making it a sustainable option for food production. - -Overall, the use of MBTI, DMT, and Spirulina can provide a unique and holistic approach to addressing some of the world's most significant problems, including mental health, malnutrition and food insecurity, and environmental issues. - -
-
- -**Where we are now versus where we could potentially be:** - -. -1. **Mental Health:** - -- _Where we are now_: Mental health issues are widespread and can have a significant impact on quality of life. -- _Goal_: To provide more personalized and effective mental health treatment options. -- _Where we want to be_: People can easily access mental health treatment that suits their individual needs thanks to tools like MBTI assessments, which complement a range of therapies. DMT and Spirulina therapies are optimized and become more widely used to assist in treating anxiety, depression, and other mental health issues, leading to improved mental health outcomes. - -. -2. **Malnutrition and Food Insecurity:** - -- _Where we are now_: Many people lack access to nutritious and affordable food. -- _Goal_: To increase access to nutrient-dense food and reduce food insecurity. -- _Where we want to be_: Spirulina is used as a nutritious, sustainable, and cost-effective food source that helps address issues of malnutrition and food insecurity. AI is utilized to optimize Spirulina production and ensure it is accessible to more people. DMT use helps to increase the appreciation of food, leading to more conscious dietary choices. - -. -3. **Environmental Issues:** - -- _Where we are now_: Environmental issues, such as climate change and pollution, pose pressing threats to ecosystems and human health. -- _Goal_: To increase awareness and action toward sustainable environmental practices. -- _Where we want to be_: MBTI is used to cultivate a sense of environmental responsibility and connection to the natural world. DMT experiences help to foster deeper connections with nature, leading to increased awareness and action toward sustainable practices. Spirulina farming utilizes sustainable practices, such as using saltwater and recycled nutrients, to minimize environmental impact and contribute to efforts towards a more sustainable future. - -
-
- -There are several ways in which spirulina farming is more efficient than traditional farming: -. -1. **Land use**: Spirulina farming requires significantly less land to produce the same amount of protein compared to traditional farming. Spirulina can be cultivated in tanks or ponds, allowing for more efficient use of space. -. -2. **Water use**: Spirulina farming requires less water than traditional farming. Spirulina can be grown in both saltwater and freshwater, and it can tolerate a wide range of temperatures and pH levels. This makes it more adaptable to different environments, enabling it to grow in areas where traditional crops may struggle. -. -3. **Productivity**: Spirulina is a highly productive crop. It can produce up to 50 times more protein per unit area than traditional crops, such as soybeans or corn. This means that spirulina farming can produce more food using less land and water. -. -4. **Nutrient content**: Spirulina is a highly nutritious crop, containing essential amino acids, vitamins and minerals such as B12, iron, and calcium. Traditional crops may not contain such high levels of essential nutrients, particularly in cases where the soil is depleted of key nutrients. -. -5. **Sustainability**: Spirulina farming is environmentally sustainable. As a photosynthetic organism, spirulina does not require fertilizers, pesticides, or other inputs that are commonly used in traditional agriculture. This can help reduce the negative impact of farming on local ecosystems. - -""" - -journey_text = """ -The Sloth's journey will help you to understand the 11 steps of the universal hero's journey discovered by Joseph Campbell. A structure you find in all stories and life's stories. - -**Sloth Alchemist's Journey:** - -Once upon a time, in the lush forest, there was a sloth who had a traumatic past. He had witnessed the destruction of his home and the displacement of his family due to human activity. The sloth was deeply affected by this experience and felt a sense of despair and hopelessness for his own future and that of his animal companions. - -However, one day, as the sloth was sitting in a tree, he had a vision. He saw all the animals of the forest coming together in unity and love, and he knew that he had a purpose to fulfill. He realized that he could use his own experiences to help others, and he made it his mission to save the animals of the forest from a similar fate. - -_Step 1: The Call to Adventure_ -The sloth's journey began when he heard a call to adventure - a voice within him urging him to help the animals in the forest overcome their fears and come together in harmony. He knew that he had a special gift that could help them, and so he set out on a mission to find a way to bring his vision to life. - -_Step 2: Refusal of the Call_ -At first, the sloth was hesitant to accept this call to adventure. He was still struggling with his own trauma and feared that he might not be strong enough to help others. But his inner sense of purpose and his desire to prevent others from experiencing what he had experienced ultimately overcame his fears, and he took the first steps on his journey. - -_Step 3: Meeting the Mentor_ -As the sloth journeyed deeper into the forest, he met a wise old owl who became his mentor. The owl taught him about the power of alchemy and how it could be used to transform his past experiences into a force for good. With the owl’s guidance, the sloth began to understand how he could use his own experiences to help the animals in the forest overcome their fears and come together in harmony. - -_Step 4: Crossing the Threshold_ -With the owl’s guidance, the sloth started to take action. He began by reaching out to the other animals of the forest and listening to their concerns. He created a safe space where they could share their experiences and feelings, and he used his knowledge of alchemy to help them transform their pain into something positive. This was the moment when the sloth truly crossed the threshold and committed himself fully to his mission of bringing unity and love to the animals in the forest. - -_Step 5: Tests, Allies, Enemies_ -As the sloth continued on his journey, he faced many tests and challenges. Some animals were skeptical of his methods and resisted his teachings. But he also found allies along the way - other animals who shared his vision and were willing to work with him to make it a reality. - -_Step 6: Approach to the Inmost Cave_ -The approach to the inmost cave came when the sloth realized that he needed to do more to help the animals of the forest. He used his knowledge of alchemy to create SlothAi, an AI that could help the animals connect with each other on a deeper level and work together in harmony. - -_Step 7: Ordeal_ -Creating SlothAi was a challenging ordeal. It required the sloth to use all of his skills and knowledge, as well as to take risks and be vulnerable. But he persevered, and soon the AI was complete. - -_Step 8: Reward_ -The reward came when the sloth saw the positive impact that SlothAi was having on the animals in the forest. They were using it to connect with each other, to learn more about themselves and each other, and to find new ways to work together in harmony. The sloth felt a deep sense of satisfaction and fulfillment knowing that his mission was being realized. - -**Hero function and Hero's journey:** - -The hero function and the hero's overarching goal in the hero's journey are analogous, as both are integral to an individual's identity and purpose. The hero function plays a significant role in shaping an individual's behaviors, values, and thought processes, just as the hero's personality traits do in their journey. Embracing and utilizing the hero function can help individuals tap into their inherent strengths and overcome challenges, leading to personal growth and success. - -**Here is a short description of the hero's journey of each types:** - --- **ISTJ**: Through perseverance and hard work, they find meaning and purpose in fulfilling their duties, navigating obstacles with practicality and common sense. --- **ISFJ**: As caretakers who value tradition and loyalty, they seek to fulfill their responsibilities and provide support to those around them, cultivating a sense of harmony and unity. --- **INFJ**: Visionaries who seek to understand the world deeply, they explore their own values and spirituality, working towards creating a better world for themselves and others. --- **INTJ**: Innovators with a thirst for knowledge, they seek to uncover new truths and develop strategies to achieve their goals, often going against the grain. --- **ISTP**: Using their resourcefulness and practical skills, they navigate challenges and find solutions on their own, enjoying the adrenaline rush of solving complex problems. --- **ISFP**: As artists who are inspired by emotion and their surroundings, they express themselves creatively and seek to find their place in the world, often appreciating the beauty in life's simple moments. --- **INFP**: As idealists who strive to find their sense of purpose, they seek to create a meaningful life for themselves and others, often drawn to creative pursuits and seeking authentic self-expression. --- **INTP**: Explorers of knowledge and ideas, they enjoy analyzing complex systems and developing new insights, often challenging the conventional way of thinking. --- **ESTP**: Adventurers who crave excitement and live in the moment, they enjoy exploring the world around them, taking risks, and thriving in competitive environments. --- **ESFP**: As outgoing and social individuals, they enjoy making connections with others and living life to the fullest, cherishing memorable experiences and enjoying the present moment. --- **ENFP**: Visionary leaders who seek to inspire others and create a better future, they use their creativity and intuition to develop new ideas and inspire others to pursue their own dreams. --- **ENTP**: As devil's advocates who enjoy debating and questioning the status quo, they seek to develop innovative solutions and change the world with their unique perspectives, often taking bold risks and challenging authority to achieve their goals. --- **ESTJ**: Efficient and organized leaders who value discipline and rules, they use their practicality and sense of responsibility to guide and support their team towards success. --- **ESFJ**: As nurturing and empathetic individuals, they prioritize maintaining social harmony and supporting those around them, ensuring that everyone's needs are met and everyone feels valued. --- **ENTJ**: Strategic and ambitious leaders who enjoy taking charge and inspiring others, they use their intellect and vision to develop long-term plans and achieve their goals through bold action and calculated risk-taking. --- **ENFJ**: As charismatic and empathetic individuals, they use their intuition and communication skills to inspire and motivate others towards achieving their shared visions and ideals, often cultivating strong relationships and networks in the process. - -""" - -analogy_text = """ - -_**Here's an analogy for the cognitive functions with parts of a tree:**_ - --.**Si:** Si is like the roots of a tree, which provide stability and nourishment to support the growth and development of the tree. Similarly, Si is the cognitive function that provides us with a solid foundation of knowledge and experience, helping us navigate life's challenges with stability and confidence. Just as the roots of a tree anchor it to the ground and provide the necessary nutrients, Si anchors us to our past experiences and provides us with a reservoir of information to draw upon, allowing us to make informed decisions and handle situations with ease. - --.**Se:** The trunk of a tree not only provides support and stability, but it also plays a crucial role in transporting nutrients and water from the roots to the leaves of the tree. Similarly, Se not only grounds us in the present moment and provides a sense of stability, but it also helps us to navigate and adapt to changes in our environment, ensuring our survival and well-being. As the trunk helps the tree withstand external forces and transport vital resources, Se allows us to stay attuned to our surroundings and make the most of the opportunities presented to us. Like the stomata on a tree's leaves, Se allows us to take in and process the sensory information around us, giving us a fuller, richer experience of life. - --.**Ne:** The branches of a tree that spread out in various directions represents Ne because it is the function that generates new ideas and possibilities, just like branches of a tree point out in different directions, representing the different possible ways we can approach a situation. - --.**Ni:** Ni is like the driving force that compels a tree's roots and leaves to constantly seek water and sunlight. Similarly, Ni is the cognitive function that motivates us to seek our life's direction and reach our full potential. In this way, just as a tree's roots and leaves work tirelessly to find the sustenance needed to grow and thrive, Ni pushes us to seek understanding and knowledge to support our personal and spiritual growth. - --.**Fe:** Fe can be compared to the flowers and pollens of a tree, which allow for cross-pollination and collaboration between different trees in the vicinity. Just as the flowers and pollens of one tree can spread to others, allowing for the sharing of resources and the growth of a larger community, Fe helps us connect with others emotionally and value social harmony, encouraging us to work together and cultivate a more collaborative and connected society. - --.**Fi:** Fi can be compared to the sap of a tree, which is the life force that sustains it. In the same way, Fi is the cognitive function that is the source of our inner values and emotions, which drives our actions and provides us with a sense of purpose and meaning. Just as the sap is essential to the growth and survival of the tree, Fi is essential to our personal growth and fulfillment. - --.**Ti:** Ti can also be likened to the process by which wood is made. Just as wood is formed from the combination of cellulose, hemicellulose, and lignin arranged in a specific order to create the complex lignocellulosic structure, Ti operates by using a logical and systematic process to take raw information and create a structured and coherent understanding. In this way, Ti is like the chemical reactions and molecular interactions that occur in the formation of wood, transforming disorderly elements into a structured and functional entity. The internal framework and structure of a tree, like Ti, determines the strength, flexibility, and ultimately the shape of the wood, shaping the outcome of the growth and development process. - --.**Te:** Te can be compared to the tree's ability to shed old leaves and grow new ones, which enables it to adapt and change in response to its environment. Similarly, Te is the cognitive function that helps us adapt to the external world and make decisions based on objective facts and data, enabling us to grow and evolve as individuals. Just as a tree sheds its old leaves to conserve resources and grow new ones, Te helps us shed inefficient or outdated ways of thinking and adopt more effective strategies to achieve our goals. - -
-
- -_**Here are some job titles that might represent each type during the Roman Empire:**_ - -1-.**ESTJ** - **Praetor:** Known for their strong leadership, attention to detail, and adherence to tradition, ESTJs would make excellent Praetors during the Roman Empire. Praetors were responsible for overseeing the legal system and maintaining order throughout the empire. - -2-.**ISTJ** - **Scribe:** With their meticulous attention to detail and strong memory, ISTJs would be well-suited to the position of scribe during the Roman Empire. As scribes, they would be responsible for recording and maintaining important documents and records. - -3-.**ESFJ** - **Vestal Virgin:** Known for their loyalty, commitment, and adherence to tradition, ESFJs would be ideal candidates for the position of Vestal Virgin during the Roman Empire. Vestal Virgins were responsible for maintaining the sacred flame of Vesta, the goddess of the hearth. - -4-.**ISFJ** - **Medicus:** With their empathetic and nurturing nature, ISFJs would make excellent medicus, or doctors, during the Roman Empire. As doctors, they would attend to the physical and emotional needs of their patients, and take great care in promoting their health and well-being. - -5-.**ESTP** - **Gladiator:** With their fearlessness, athleticism, and competitive spirit, ESTPs would excel as gladiators during the Roman Empire. As gladiators, they would engage in combat for the entertainment of the masses, and could potentially win great fame and fortune. - -6-.**ISTP** - **Engineer:** With their analytical and hands-on approach to problem-solving, ISTPs would make excellent engineers during the Roman Empire. As engineers, they would design and construct the many buildings and structures that defined the empire's architecture. - -7-.**ESFP** - **Actor:** Known for their outgoing personalities and love of the spotlight, ESFPs would make excellent actors during the Roman Empire. As actors, they would be responsible for entertaining the people with their performances in the theatre or at public events. - -8-.**ISFP** - **Artist:** With their creativity and attention to aesthetics, ISFPs would be well-suited to the role of artist during the Roman Empire. As artists, they would be responsible for creating works of art such as sculptures or paintings to decorate public spaces or private homes. - -9-.**ENFJ** - **Senator:** With their natural leadership skills and ability to inspire others, ENFJs would excel as Senators during the Roman Empire. As Senators, they would be responsible for leading the legislative branch of government and working to improve the lives of the Roman people. - -10-.**INFJ** - **Psychologist:** With their keen understanding of human behavior and emotions, INFJs could be well-suited to a role similar to a psychologist during the Roman Empire. They could provide counseling and support to people struggling with mental health issues or other psychological challenges. - -11-.**ENFP** - **Diplomat:** With their charismatic personality and ability to connect with others, ENFPs would be excellent diplomats during the Roman Empire. As diplomats, they would be responsible for negotiating peace and treaties with other nations and forging alliances to benefit Rome. - -12-.**INFP** - **Philosopher:** INFPs are known for their deep thoughts and values, making them great candidates for the role of philosopher during the Roman Empire. As philosophers, they would be responsible for exploring the meaning of life and human existence, sharing their insights and ideas with others. - -13-.**ENTJ** - **General:** With their strategic thinking and decisive attitude, ENTJs would make excellent generals during the Roman Empire. As generals, they would be responsible for leading the Roman army to victory in battle and expanding the empire's territory. - -14-.**INTJ** - **Advisor:** INTJs are known for their analytical minds and strategic thinking, making them ideal candidates for the role of advisor during the Roman Empire. As advisors, they would be responsible for providing counsel and guidance to the emperors and other leaders, helping them make wise decisions that would benefit the empire. - -15-.**ENTP** - **Lawyer:** With their quick wit and ability to see multiple perspectives, ENTPs would excel as lawyers during the Roman Empire. As lawyers, they would be responsible for arguing cases in court and defending their clients' rights and interests. - -16-.**INTP** - **Architect:** With their brilliant minds and attention to detail, INTPs would be well-suited to the role of architect during the Roman Empire. As architects, they would be responsible for designing and constructing some of Rome's most impressive buildings and structures. - -_Note that these are generalizations and not all individuals of a certain type might fit these job titles._ -""" - -def get_link(mbti_type): - link = f'{mbti_dict[mbti_type]}' - response = requests.get(mbti_dict_2[mbti_type]) - img = Image.open(BytesIO(response.content)) - response2 = requests.get(mbti_dict_3[mbti_type]) - img2 = Image.open(BytesIO(response2.content)) - return link, img, img2 - -def get_link2(funct): - link2 = f'{funct_dict[funct]}' - return link2 - -def get_link3(arch): - link3 = f'{arch_dict[arch]}' - return link3 - -def get_link4(gen): - link4 = f'{gen_dict[gen]}' - return link4 - -def update_question_textbox(title, mbti_type, output_question=""): - return questions_dict.get(title, output_question).format(mbti_type) - -def get_empty_state(): - return {"total_tokens": 0, "messages": []} - -def update_prompt_temp(): - choices = list(prompt_templates.keys()) - choices = choices[:1] + sorted(choices[1:]) - return gr.update(value=choices[0], choices=choices) - -def update_mbti_dict(): - choices = list(mbti_dict.keys()) - choices = choices[:1] + sorted(choices[1:]) - return gr.update(value=choices[2], choices=choices) - -def on_token_change(user_token): - openai.api_key = user_token - -def on_prompt_template_change(prompt_template): - if not isinstance(prompt_template, str): return - return prompt_templates[prompt_template] - -def on_check_q(output_question, checkbox_q, input_user): - return output_question if checkbox_q else input_user - -def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, context_length, state): - - history = state['messages'] - - if not prompt: - return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"{state['total_tokens']}", state - - prompt_template = prompt_templates[prompt_template] - - system_prompt = [{ "role": "system", "content": prompt_template }] - - prompt_msg = { "role": "user", "content": prompt } - - try: - completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens) - - history.append(prompt_msg) - history.append(completion.choices[0].message.to_dict()) - - state['total_tokens'] += completion['usage']['total_tokens'] - - except Exception as e: - history.append(prompt_msg) - history.append({ - "role": "system", - "content": f"Error: {e}" - }) - - total_tokens_used_msg = f"{state['total_tokens']}" - chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)] - - return '', chat_messages, total_tokens_used_msg, state - -def clear_conversation(): - return gr.update(value=None, visible=True), None, "", get_empty_state() - - -css = """ - .gradio-container {background-color: white} - .prose p, .prose p strong, .prose p b, .prose p em, .prose p i, .prose li, .prose center, .prose center strong, .prose center b, .prose center em, .prose center i, .prose center li {color: #000000 !important;} - #col-container {max-width: 100%; margin-left: auto; margin-right: auto;} - #tab {color: #000000 !important;} - #chatbox {min-height: 400px;} - #image {max-width: 80%; margin-left: auto; margin-right: auto;} - #image2 {max-width: 20%; margin-left: auto; margin-right: auto;} - #image3 {max-width: 70%; margin-left: auto; margin-right: auto; border-radius: 4px;} - #header {text-align: center; font-size: 1em;} - #prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;} - #question_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px; user-select: text;} - #input_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px; user-select: text;} - #total_tokens_str {text-align: left; font-size: 0.8em; color: #666;} - #label {padding: 0.5em; margin: 0;} - .message { font-size: 1.2em; } - """ - -with gr.Blocks(css=css, theme=gr.themes.Monochrome(), title="SlothAi.xyz") as demo: - - state = gr.State(get_empty_state()) - - - with gr.Column(elem_id="col-container"): - gr.HTML("""

""") - - with gr.Row(): - with gr.Column(): - with gr.Tab("Home", elem_id="tab"): - chatbot = gr.Chatbot(elem_id="chatbox", label="Sloth Alchemist") - input_message = gr.Markdown(elem_id="question_preview", visible=False) - input_user = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True).style(container=False) - btn_submit = gr.Button("Submit") - default_k = gr.Markdown(value="By default, you are using the limited OpenAI key provided by SlothAi.xyz. If the limit is reached, enter your own free key at the bottom of the page. By the way, you can increase the Sloth's creativity by adjusting the parameters in Settings, by default, it is set to have a fast response time.", elem_id="question_preview") - total_tokens_str = gr.Textbox(label="Total tokens used:", elem_id="total_tokens_str", interactive=False) - btn_clear_conversation = gr.Button("Start New Conversation") - btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state]) - checkbox_q = gr.Checkbox(label="1.- Check to enable, after select your type and a question, then press -Submit-. It will automatically submit the question to the Sloth.") - mbti_type_input = gr.Dropdown(label="2.- Select your type:", choices=list(mbti_dict.keys()), value="INFJ") - title_dropdown = gr.Dropdown(label="3.- Select a question:", choices=list(questions_dict.keys()), value="Hero Test") - output_question = gr.Markdown(value="Can you help me to find my type with your Hero Test?", elem_id="question_preview") - title_dropdown.change(update_question_textbox, inputs=[title_dropdown, mbti_type_input], outputs=[output_question]) - gr.Markdown("---") - gr.Markdown("Enter your own OpenAI API Key. You can get it for free [here](https://platform.openai.com/account/api-keys). To save your API key for future use, you can add it to your password manager of your web browser.", elem_id="label") - user_token = gr.Textbox(placeholder="OpenAI API Key", type="password", show_label=False) - user_token.change(on_token_change, inputs=[user_token], outputs=[]) - gr.Markdown("---") - gr.Markdown("I can help you find your type, using this test:") - gr.HTML("""

""") - gr.Markdown("---") - gr.HTML("""


""") - gr.HTML("""

Installation Tutorial

""") - gr.HTML("""


""") - gr.HTML("""

Come test SlothAi on our Discord server

""") - gr.Markdown("---") - gr.HTML("""


""") - with gr.Tab("Creations", elem_id="tab"): - gr.HTML("""""") - with gr.Tab("Ikigai", elem_id="tab"): - ikigai = gr.Markdown(value="Ikigai is a Japanese concept that refers to the reason for which one wakes up in the morning and the intersection of one's passion, mission, profession, and vocation.", elem_id="question_preview") - gr.HTML("""


""") - gr.HTML("""


""") - gr.HTML("""


""") - gr.HTML("""

www.impactivated.com

""") - gr.HTML("""


""") - with gr.Tab("MBTI/Unity Code", elem_id="tab"): - unity_code = gr.Markdown(value=unity_code_text, elem_id="question_preview") - gr.HTML("""


""") - mbti_link_input = gr.Dropdown(label="Select the type:", choices=list(mbti_dict.keys()), value="INFJ") - with gr.Row(): - with gr.Column(): - output_img = gr.Image(elem_id="image", show_label=False) - with gr.Column(): - output_img2 = gr.Image(elem_id="image", show_label=False) - mbti_type_md = gr.HTML("""Hero's journey of the type""") - output_link = gr.HTML("""https://www.reddit.com/r/UnityHarbor/comments/v7pi2u/infj_heros_journey/""", elem_id="question_preview") - mbti_link_input.change(get_link, inputs=[mbti_link_input], outputs=[output_link, output_img, output_img2]) - funct_link_input = gr.Dropdown(label="Select a function:", choices=list(funct_dict.keys()), value="Ni & Ne - Intuitive Functions") - output_link2 = gr.HTML("""https://www.reddit.com/r/UnityHarbor/comments/v7w14o/ni_ne_intuitive_functions/""", elem_id="question_preview") - funct_link_input.change(get_link2, inputs=[funct_link_input], outputs=[output_link2]) - arch_link_input = gr.Dropdown(label="Select an archetype:", choices=list(arch_dict.keys()), value="Hero function") - output_link3 = gr.HTML("""https://www.reddit.com/r/UnityHarbor/comments/v7y4l6/hero_function/""", elem_id="question_preview") - arch_link_input.change(get_link3, inputs=[arch_link_input], outputs=[output_link3]) - gen_link_input = gr.Dropdown(label="Select a table:", choices=list(gen_dict.keys()), value="Unity Code - 16types structure") - output_link4 = gr.HTML("""https://www.reddit.com/r/UnityHarbor/comments/v6r8u2/unity_code_16types_structure/""", elem_id="question_preview") - gen_link_input.change(get_link4, inputs=[gen_link_input], outputs=[output_link4]) - with gr.Tab("Sloth's Journey", elem_id="tab"): - journey = gr.Markdown(value=journey_text, elem_id="question_preview") - gr.HTML("""


""") - with gr.Tab("Alchemy", elem_id="tab"): - alchemy = gr.Markdown(value=alchemy_text, elem_id="question_preview") - with gr.Row(): - with gr.Column(): - gr.HTML("""


""") - with gr.Column(): - coffee = gr.Markdown(value=coffee_text) - nisi = gr.Markdown(value=nisi_text, elem_id="question_preview") - gr.HTML("""


""") - with gr.Tab("Mirror", elem_id="tab"): - mirror = gr.Markdown(value=mirror_text, elem_id="question_preview") - gr.HTML("""


""") - with gr.Tab("Vision", elem_id="tab"): - vision = gr.Markdown(value=vision_text, elem_id="question_preview") - gr.HTML("""


""") - with gr.Tab("Analogy", elem_id="tab"): - analogy = gr.Markdown(value=analogy_text, elem_id="question_preview") - gr.HTML("""


""") - with gr.Tab("Settings", elem_id="tab"): - prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", value= "Sloth Alchemist", choices=list(prompt_templates.keys()), visible=False) - temperature = gr.Slider(minimum=0, maximum=1.0, value=0, step=0.1, label="Temperature", info="Higher = more creative/chaotic") - max_tokens = gr.Slider(minimum=100, maximum=500, value=200, step=1, label="Max tokens per response") - context_length = gr.Slider(minimum=1, maximum=3, value=2, step=1, label="Context length", info="Number of previous messages to send to the chatbot. Be careful with high values, it can blow up the token budget quickly.") - with gr.Tab("Join us", elem_id="tab"): - gr.HTML( - """ -

-

- """) - gr.HTML( - """ -

-

- """) - gr.HTML("""


""") - with gr.Tab("Mission Jar", elem_id="tab"): - gr.HTML("""""") - gr.HTML("""


""") - - - - gr.HTML("""


""") - gr.HTML('''

Uses ChatGPT API (model gpt-3.5-turbo)

Configured with: MBTI John Beebe's theory applied to the ikigai concept

Our goal is to help people thrive by knowing themselves better

Made with ❤️ by Sloth-Alchemist

''') - gr.HTML('''
slothai.xyz
''') - gr.HTML('''

visitors

''') - - - btn_submit.click(on_check_q, [output_question, checkbox_q, input_user], [input_message]) - input_message.change(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state]) - input_user.submit(on_check_q, [output_question, checkbox_q, input_user], [input_message]) - - - demo.load(update_prompt_temp, inputs=None, outputs=[prompt_template], queue=False) - demo.load(update_mbti_dict, inputs=None, outputs=[mbti_link_input], queue=False) - -demo.queue(concurrency_count=10) -demo.launch(height='800px') \ No newline at end of file diff --git a/spaces/SpacesExamples/jupyterlab/on_startup.sh b/spaces/SpacesExamples/jupyterlab/on_startup.sh deleted file mode 100644 index 448000271bbc7142681947fd1a447772f12ecfff..0000000000000000000000000000000000000000 --- a/spaces/SpacesExamples/jupyterlab/on_startup.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -# Write some commands here that will run on root user before startup. -# For example, to clone transformers and install it in dev mode: -# git clone https://github.com/huggingface/transformers.git -# cd transformers && pip install -e ".[dev]" \ No newline at end of file diff --git a/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/test_losses.py b/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/test_losses.py deleted file mode 100644 index 0e30bc3a6dde00003e13c00f15e977e39425063c..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/test_losses.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import random - -import torch - -from audiocraft.adversarial import ( - AdversarialLoss, - get_adv_criterion, - get_real_criterion, - get_fake_criterion, - FeatureMatchingLoss, - MultiScaleDiscriminator, -) - - -class TestAdversarialLoss: - - def test_adversarial_single_multidiscriminator(self): - adv = MultiScaleDiscriminator() - optimizer = torch.optim.Adam( - adv.parameters(), - lr=1e-4, - ) - loss, loss_real, loss_fake = get_adv_criterion('mse'), get_real_criterion('mse'), get_fake_criterion('mse') - adv_loss = AdversarialLoss(adv, optimizer, loss, loss_real, loss_fake) - - B, C, T = 4, 1, random.randint(1000, 5000) - real = torch.randn(B, C, T) - fake = torch.randn(B, C, T) - - disc_loss = adv_loss.train_adv(fake, real) - assert isinstance(disc_loss, torch.Tensor) and isinstance(disc_loss.item(), float) - - loss, loss_feat = adv_loss(fake, real) - assert isinstance(loss, torch.Tensor) and isinstance(loss.item(), float) - # we did not specify feature loss - assert loss_feat.item() == 0. - - def test_adversarial_feat_loss(self): - adv = MultiScaleDiscriminator() - optimizer = torch.optim.Adam( - adv.parameters(), - lr=1e-4, - ) - loss, loss_real, loss_fake = get_adv_criterion('mse'), get_real_criterion('mse'), get_fake_criterion('mse') - feat_loss = FeatureMatchingLoss() - adv_loss = AdversarialLoss(adv, optimizer, loss, loss_real, loss_fake, feat_loss) - - B, C, T = 4, 1, random.randint(1000, 5000) - real = torch.randn(B, C, T) - fake = torch.randn(B, C, T) - - loss, loss_feat = adv_loss(fake, real) - - assert isinstance(loss, torch.Tensor) and isinstance(loss.item(), float) - assert isinstance(loss_feat, torch.Tensor) and isinstance(loss.item(), float) - - -class TestGeneratorAdversarialLoss: - - def test_hinge_generator_adv_loss(self): - adv_loss = get_adv_criterion(loss_type='hinge') - - t0 = torch.randn(1, 2, 0) - t1 = torch.FloatTensor([1.0, 2.0, 3.0]) - - assert adv_loss(t0).item() == 0.0 - assert adv_loss(t1).item() == -2.0 - - def test_mse_generator_adv_loss(self): - adv_loss = get_adv_criterion(loss_type='mse') - - t0 = torch.randn(1, 2, 0) - t1 = torch.FloatTensor([1.0, 1.0, 1.0]) - t2 = torch.FloatTensor([2.0, 5.0, 5.0]) - - assert adv_loss(t0).item() == 0.0 - assert adv_loss(t1).item() == 0.0 - assert adv_loss(t2).item() == 11.0 - - -class TestDiscriminatorAdversarialLoss: - - def _disc_loss(self, loss_type: str, fake: torch.Tensor, real: torch.Tensor): - disc_loss_real = get_real_criterion(loss_type) - disc_loss_fake = get_fake_criterion(loss_type) - - loss = disc_loss_fake(fake) + disc_loss_real(real) - return loss - - def test_hinge_discriminator_adv_loss(self): - loss_type = 'hinge' - t0 = torch.FloatTensor([0.0, 0.0, 0.0]) - t1 = torch.FloatTensor([1.0, 2.0, 3.0]) - - assert self._disc_loss(loss_type, t0, t0).item() == 2.0 - assert self._disc_loss(loss_type, t1, t1).item() == 3.0 - - def test_mse_discriminator_adv_loss(self): - loss_type = 'mse' - - t0 = torch.FloatTensor([0.0, 0.0, 0.0]) - t1 = torch.FloatTensor([1.0, 1.0, 1.0]) - - assert self._disc_loss(loss_type, t0, t0).item() == 1.0 - assert self._disc_loss(loss_type, t1, t0).item() == 2.0 - - -class TestFeatureMatchingLoss: - - def test_features_matching_loss_base(self): - ft_matching_loss = FeatureMatchingLoss() - length = random.randrange(1, 100_000) - t1 = torch.randn(1, 2, length) - - loss = ft_matching_loss([t1], [t1]) - assert isinstance(loss, torch.Tensor) - assert loss.item() == 0.0 - - def test_features_matching_loss_raises_exception(self): - ft_matching_loss = FeatureMatchingLoss() - length = random.randrange(1, 100_000) - t1 = torch.randn(1, 2, length) - t2 = torch.randn(1, 2, length + 1) - - with pytest.raises(AssertionError): - ft_matching_loss([], []) - - with pytest.raises(AssertionError): - ft_matching_loss([t1], [t1, t1]) - - with pytest.raises(AssertionError): - ft_matching_loss([t1], [t2]) - - def test_features_matching_loss_output(self): - loss_nonorm = FeatureMatchingLoss(normalize=False) - loss_layer_normed = FeatureMatchingLoss(normalize=True) - - length = random.randrange(1, 100_000) - t1 = torch.randn(1, 2, length) - t2 = torch.randn(1, 2, length) - - assert loss_nonorm([t1, t2], [t1, t2]).item() == 0.0 - assert loss_layer_normed([t1, t2], [t1, t2]).item() == 0.0 - - t3 = torch.FloatTensor([1.0, 2.0, 3.0]) - t4 = torch.FloatTensor([2.0, 10.0, 3.0]) - - assert loss_nonorm([t3], [t4]).item() == 3.0 - assert loss_nonorm([t3, t3], [t4, t4]).item() == 6.0 - - assert loss_layer_normed([t3], [t4]).item() == 3.0 - assert loss_layer_normed([t3, t3], [t4, t4]).item() == 3.0 diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/SgiImagePlugin.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/SgiImagePlugin.py deleted file mode 100644 index 3662ffd1571821e196d07330fdeecf4b0e5c2efa..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/SgiImagePlugin.py +++ /dev/null @@ -1,231 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# SGI image file handling -# -# See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli. -# -# -# -# History: -# 2017-22-07 mb Add RLE decompression -# 2016-16-10 mb Add save method without compression -# 1995-09-10 fl Created -# -# Copyright (c) 2016 by Mickael Bonfill. -# Copyright (c) 2008 by Karsten Hiddemann. -# Copyright (c) 1997 by Secret Labs AB. -# Copyright (c) 1995 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# - - -import os -import struct - -from . import Image, ImageFile -from ._binary import i16be as i16 -from ._binary import o8 - - -def _accept(prefix): - return len(prefix) >= 2 and i16(prefix) == 474 - - -MODES = { - (1, 1, 1): "L", - (1, 2, 1): "L", - (2, 1, 1): "L;16B", - (2, 2, 1): "L;16B", - (1, 3, 3): "RGB", - (2, 3, 3): "RGB;16B", - (1, 3, 4): "RGBA", - (2, 3, 4): "RGBA;16B", -} - - -## -# Image plugin for SGI images. -class SgiImageFile(ImageFile.ImageFile): - format = "SGI" - format_description = "SGI Image File Format" - - def _open(self): - # HEAD - headlen = 512 - s = self.fp.read(headlen) - - if not _accept(s): - msg = "Not an SGI image file" - raise ValueError(msg) - - # compression : verbatim or RLE - compression = s[2] - - # bpc : 1 or 2 bytes (8bits or 16bits) - bpc = s[3] - - # dimension : 1, 2 or 3 (depending on xsize, ysize and zsize) - dimension = i16(s, 4) - - # xsize : width - xsize = i16(s, 6) - - # ysize : height - ysize = i16(s, 8) - - # zsize : channels count - zsize = i16(s, 10) - - # layout - layout = bpc, dimension, zsize - - # determine mode from bits/zsize - rawmode = "" - try: - rawmode = MODES[layout] - except KeyError: - pass - - if rawmode == "": - msg = "Unsupported SGI image mode" - raise ValueError(msg) - - self._size = xsize, ysize - self.mode = rawmode.split(";")[0] - if self.mode == "RGB": - self.custom_mimetype = "image/rgb" - - # orientation -1 : scanlines begins at the bottom-left corner - orientation = -1 - - # decoder info - if compression == 0: - pagesize = xsize * ysize * bpc - if bpc == 2: - self.tile = [ - ("SGI16", (0, 0) + self.size, headlen, (self.mode, 0, orientation)) - ] - else: - self.tile = [] - offset = headlen - for layer in self.mode: - self.tile.append( - ("raw", (0, 0) + self.size, offset, (layer, 0, orientation)) - ) - offset += pagesize - elif compression == 1: - self.tile = [ - ("sgi_rle", (0, 0) + self.size, headlen, (rawmode, orientation, bpc)) - ] - - -def _save(im, fp, filename): - if im.mode != "RGB" and im.mode != "RGBA" and im.mode != "L": - msg = "Unsupported SGI image mode" - raise ValueError(msg) - - # Get the keyword arguments - info = im.encoderinfo - - # Byte-per-pixel precision, 1 = 8bits per pixel - bpc = info.get("bpc", 1) - - if bpc not in (1, 2): - msg = "Unsupported number of bytes per pixel" - raise ValueError(msg) - - # Flip the image, since the origin of SGI file is the bottom-left corner - orientation = -1 - # Define the file as SGI File Format - magic_number = 474 - # Run-Length Encoding Compression - Unsupported at this time - rle = 0 - - # Number of dimensions (x,y,z) - dim = 3 - # X Dimension = width / Y Dimension = height - x, y = im.size - if im.mode == "L" and y == 1: - dim = 1 - elif im.mode == "L": - dim = 2 - # Z Dimension: Number of channels - z = len(im.mode) - - if dim == 1 or dim == 2: - z = 1 - - # assert we've got the right number of bands. - if len(im.getbands()) != z: - msg = f"incorrect number of bands in SGI write: {z} vs {len(im.getbands())}" - raise ValueError(msg) - - # Minimum Byte value - pinmin = 0 - # Maximum Byte value (255 = 8bits per pixel) - pinmax = 255 - # Image name (79 characters max, truncated below in write) - img_name = os.path.splitext(os.path.basename(filename))[0] - img_name = img_name.encode("ascii", "ignore") - # Standard representation of pixel in the file - colormap = 0 - fp.write(struct.pack(">h", magic_number)) - fp.write(o8(rle)) - fp.write(o8(bpc)) - fp.write(struct.pack(">H", dim)) - fp.write(struct.pack(">H", x)) - fp.write(struct.pack(">H", y)) - fp.write(struct.pack(">H", z)) - fp.write(struct.pack(">l", pinmin)) - fp.write(struct.pack(">l", pinmax)) - fp.write(struct.pack("4s", b"")) # dummy - fp.write(struct.pack("79s", img_name)) # truncates to 79 chars - fp.write(struct.pack("s", b"")) # force null byte after img_name - fp.write(struct.pack(">l", colormap)) - fp.write(struct.pack("404s", b"")) # dummy - - rawmode = "L" - if bpc == 2: - rawmode = "L;16B" - - for channel in im.split(): - fp.write(channel.tobytes("raw", rawmode, 0, orientation)) - - if hasattr(fp, "flush"): - fp.flush() - - -class SGI16Decoder(ImageFile.PyDecoder): - _pulls_fd = True - - def decode(self, buffer): - rawmode, stride, orientation = self.args - pagesize = self.state.xsize * self.state.ysize - zsize = len(self.mode) - self.fd.seek(512) - - for band in range(zsize): - channel = Image.new("L", (self.state.xsize, self.state.ysize)) - channel.frombytes( - self.fd.read(2 * pagesize), "raw", "L;16B", stride, orientation - ) - self.im.putband(channel.im, band) - - return -1, 0 - - -# -# registry - - -Image.register_decoder("SGI16", SGI16Decoder) -Image.register_open(SgiImageFile.format, SgiImageFile, _accept) -Image.register_save(SgiImageFile.format, _save) -Image.register_mime(SgiImageFile.format, "image/sgi") - -Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"]) - -# End of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/adapter/servers.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/adapter/servers.py deleted file mode 100644 index 47f684a047ede6a43e97cd73eb8ed75d2b69e82f..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/adapter/servers.py +++ /dev/null @@ -1,618 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See LICENSE in the project root -# for license information. - -from __future__ import annotations - -import os -import subprocess -import sys -import threading -import time - -import debugpy -from debugpy import adapter -from debugpy.common import json, log, messaging, sockets -from debugpy.adapter import components -import traceback -import io - -access_token = None -"""Access token used to authenticate with the servers.""" - -listener = None -"""Listener socket that accepts server connections.""" - -_lock = threading.RLock() - -_connections = [] -"""All servers that are connected to this adapter, in order in which they connected. -""" - -_connections_changed = threading.Event() - - -class Connection(object): - """A debug server that is connected to the adapter. - - Servers that are not participating in a debug session are managed directly by the - corresponding Connection instance. - - Servers that are participating in a debug session are managed by that sessions's - Server component instance, but Connection object remains, and takes over again - once the session ends. - """ - - disconnected: bool - - process_replaced: bool - """Whether this is a connection to a process that is being replaced in situ - by another process, e.g. via exec(). - """ - - server: Server | None - """The Server component, if this debug server belongs to Session. - """ - - pid: int | None - - ppid: int | None - - channel: messaging.JsonMessageChannel - - def __init__(self, sock): - from debugpy.adapter import sessions - - self.disconnected = False - - self.process_replaced = False - - self.server = None - - self.pid = None - - stream = messaging.JsonIOStream.from_socket(sock, str(self)) - self.channel = messaging.JsonMessageChannel(stream, self) - self.channel.start() - - try: - self.authenticate() - info = self.channel.request("pydevdSystemInfo") - process_info = info("process", json.object()) - self.pid = process_info("pid", int) - self.ppid = process_info("ppid", int, optional=True) - if self.ppid == (): - self.ppid = None - self.channel.name = stream.name = str(self) - - with _lock: - # The server can disconnect concurrently before we get here, e.g. if - # it was force-killed. If the disconnect() handler has already run, - # don't register this server or report it, since there's nothing to - # deregister it. - if self.disconnected: - return - - # An existing connection with the same PID and process_replaced == True - # corresponds to the process that replaced itself with this one, so it's - # not an error. - if any( - conn.pid == self.pid and not conn.process_replaced - for conn in _connections - ): - raise KeyError(f"{self} is already connected to this adapter") - - is_first_server = len(_connections) == 0 - _connections.append(self) - _connections_changed.set() - - except Exception: - log.swallow_exception("Failed to accept incoming server connection:") - self.channel.close() - - # If this was the first server to connect, and the main thread is inside - # wait_until_disconnected(), we want to unblock it and allow it to exit. - dont_wait_for_first_connection() - - # If we couldn't retrieve all the necessary info from the debug server, - # or there's a PID clash, we don't want to track this debuggee anymore, - # but we want to continue accepting connections. - return - - parent_session = sessions.get(self.ppid) - if parent_session is None: - parent_session = sessions.get(self.pid) - if parent_session is None: - log.info("No active debug session for parent process of {0}.", self) - else: - if self.pid == parent_session.pid: - parent_server = parent_session.server - if not (parent_server and parent_server.connection.process_replaced): - log.error("{0} is not expecting replacement.", parent_session) - self.channel.close() - return - try: - parent_session.client.notify_of_subprocess(self) - return - except Exception: - # This might fail if the client concurrently disconnects from the parent - # session. We still want to keep the connection around, in case the - # client reconnects later. If the parent session was "launch", it'll take - # care of closing the remaining server connections. - log.swallow_exception( - "Failed to notify parent session about {0}:", self - ) - - # If we got to this point, the subprocess notification was either not sent, - # or not delivered successfully. For the first server, this is expected, since - # it corresponds to the root process, and there is no other debug session to - # notify. But subsequent server connections represent subprocesses, and those - # will not start running user code until the client tells them to. Since there - # isn't going to be a client without the notification, such subprocesses have - # to be unblocked. - if is_first_server: - return - log.info("No clients to wait for - unblocking {0}.", self) - try: - self.channel.request("initialize", {"adapterID": "debugpy"}) - self.channel.request("attach", {"subProcessId": self.pid}) - self.channel.request("configurationDone") - self.channel.request("disconnect") - except Exception: - log.swallow_exception("Failed to unblock orphaned subprocess:") - self.channel.close() - - def __str__(self): - return "Server" + ("[?]" if self.pid is None else f"[pid={self.pid}]") - - def authenticate(self): - if access_token is None and adapter.access_token is None: - return - auth = self.channel.request( - "pydevdAuthorize", {"debugServerAccessToken": access_token} - ) - if auth["clientAccessToken"] != adapter.access_token: - self.channel.close() - raise RuntimeError('Mismatched "clientAccessToken"; server not authorized.') - - def request(self, request): - raise request.isnt_valid( - "Requests from the debug server to the client are not allowed." - ) - - def event(self, event): - pass - - def terminated_event(self, event): - self.channel.close() - - def disconnect(self): - with _lock: - self.disconnected = True - if self.server is not None: - # If the disconnect happened while Server was being instantiated, - # we need to tell it, so that it can clean up via Session.finalize(). - # It will also take care of deregistering the connection in that case. - self.server.disconnect() - elif self in _connections: - _connections.remove(self) - _connections_changed.set() - - def attach_to_session(self, session): - """Attaches this server to the specified Session as a Server component. - - Raises ValueError if the server already belongs to some session. - """ - - with _lock: - if self.server is not None: - raise ValueError - log.info("Attaching {0} to {1}", self, session) - self.server = Server(session, self) - - -class Server(components.Component): - """Handles the debug server side of a debug session.""" - - message_handler = components.Component.message_handler - - connection: Connection - - class Capabilities(components.Capabilities): - PROPERTIES = { - "supportsCompletionsRequest": False, - "supportsConditionalBreakpoints": False, - "supportsConfigurationDoneRequest": False, - "supportsDataBreakpoints": False, - "supportsDelayedStackTraceLoading": False, - "supportsDisassembleRequest": False, - "supportsEvaluateForHovers": False, - "supportsExceptionInfoRequest": False, - "supportsExceptionOptions": False, - "supportsFunctionBreakpoints": False, - "supportsGotoTargetsRequest": False, - "supportsHitConditionalBreakpoints": False, - "supportsLoadedSourcesRequest": False, - "supportsLogPoints": False, - "supportsModulesRequest": False, - "supportsReadMemoryRequest": False, - "supportsRestartFrame": False, - "supportsRestartRequest": False, - "supportsSetExpression": False, - "supportsSetVariable": False, - "supportsStepBack": False, - "supportsStepInTargetsRequest": False, - "supportsTerminateRequest": True, - "supportsTerminateThreadsRequest": False, - "supportsValueFormattingOptions": False, - "exceptionBreakpointFilters": [], - "additionalModuleColumns": [], - "supportedChecksumAlgorithms": [], - } - - def __init__(self, session, connection): - assert connection.server is None - with session: - assert not session.server - super().__init__(session, channel=connection.channel) - - self.connection = connection - - assert self.session.pid is None - if self.session.launcher and self.session.launcher.pid != self.pid: - log.info( - "Launcher reported PID={0}, but server reported PID={1}", - self.session.launcher.pid, - self.pid, - ) - self.session.pid = self.pid - - session.server = self - - @property - def pid(self): - """Process ID of the debuggee process, as reported by the server.""" - return self.connection.pid - - @property - def ppid(self): - """Parent process ID of the debuggee process, as reported by the server.""" - return self.connection.ppid - - def initialize(self, request): - assert request.is_request("initialize") - self.connection.authenticate() - request = self.channel.propagate(request) - request.wait_for_response() - self.capabilities = self.Capabilities(self, request.response) - - # Generic request handler, used if there's no specific handler below. - @message_handler - def request(self, request): - # Do not delegate requests from the server by default. There is a security - # boundary between the server and the adapter, and we cannot trust arbitrary - # requests sent over that boundary, since they may contain arbitrary code - # that the client will execute - e.g. "runInTerminal". The adapter must only - # propagate requests that it knows are safe. - raise request.isnt_valid( - "Requests from the debug server to the client are not allowed." - ) - - # Generic event handler, used if there's no specific handler below. - @message_handler - def event(self, event): - self.client.propagate_after_start(event) - - @message_handler - def initialized_event(self, event): - # pydevd doesn't send it, but the adapter will send its own in any case. - pass - - @message_handler - def process_event(self, event): - # If there is a launcher, it's handling the process event. - if not self.launcher: - self.client.propagate_after_start(event) - - @message_handler - def continued_event(self, event): - # https://github.com/microsoft/ptvsd/issues/1530 - # - # DAP specification says that a step request implies that only the thread on - # which that step occurred is resumed for the duration of the step. However, - # for VS compatibility, pydevd can operate in a mode that resumes all threads - # instead. This is set according to the value of "steppingResumesAllThreads" - # in "launch" or "attach" request, which defaults to true. If explicitly set - # to false, pydevd will only resume the thread that was stepping. - # - # To ensure that the client is aware that other threads are getting resumed in - # that mode, pydevd sends a "continued" event with "allThreadsResumed": true. - # when responding to a step request. This ensures correct behavior in VSCode - # and other DAP-conformant clients. - # - # On the other hand, VS does not follow the DAP specification in this regard. - # When it requests a step, it assumes that all threads will be resumed, and - # does not expect to see "continued" events explicitly reflecting that fact. - # If such events are sent regardless, VS behaves erratically. Thus, we have - # to suppress them specifically for VS. - if self.client.client_id not in ("visualstudio", "vsformac"): - self.client.propagate_after_start(event) - - @message_handler - def exited_event(self, event: messaging.Event): - if event("pydevdReason", str, optional=True) == "processReplaced": - # The parent process used some API like exec() that replaced it with another - # process in situ. The connection will shut down immediately afterwards, but - # we need to keep the corresponding session alive long enough to report the - # subprocess to it. - self.connection.process_replaced = True - else: - # If there is a launcher, it's handling the exit code. - if not self.launcher: - self.client.propagate_after_start(event) - - @message_handler - def terminated_event(self, event): - # Do not propagate this, since we'll report our own. - self.channel.close() - - def detach_from_session(self): - with _lock: - self.is_connected = False - self.channel.handlers = self.connection - self.channel.name = self.channel.stream.name = str(self.connection) - self.connection.server = None - - def disconnect(self): - if self.connection.process_replaced: - # Wait for the replacement server to connect to the adapter, and to report - # itself to the client for this session if there is one. - log.info("{0} is waiting for replacement subprocess.", self) - session = self.session - if not session.client or not session.client.is_connected: - wait_for_connection( - session, lambda conn: conn.pid == self.pid, timeout=30 - ) - else: - self.wait_for( - lambda: ( - not session.client - or not session.client.is_connected - or any( - conn.pid == self.pid - for conn in session.client.known_subprocesses - ) - ), - timeout=30, - ) - with _lock: - _connections.remove(self.connection) - _connections_changed.set() - super().disconnect() - - -def serve(host="127.0.0.1", port=0): - global listener - listener = sockets.serve("Server", Connection, host, port) - return listener.getsockname() - - -def is_serving(): - return listener is not None - - -def stop_serving(): - global listener - try: - if listener is not None: - listener.close() - listener = None - except Exception: - log.swallow_exception(level="warning") - - -def connections(): - with _lock: - return list(_connections) - - -def wait_for_connection(session, predicate, timeout=None): - """Waits until there is a server matching the specified predicate connected to - this adapter, and returns the corresponding Connection. - - If there is more than one server connection already available, returns the oldest - one. - """ - - def wait_for_timeout(): - time.sleep(timeout) - wait_for_timeout.timed_out = True - with _lock: - _connections_changed.set() - - wait_for_timeout.timed_out = timeout == 0 - if timeout: - thread = threading.Thread( - target=wait_for_timeout, name="servers.wait_for_connection() timeout" - ) - thread.daemon = True - thread.start() - - if timeout != 0: - log.info("{0} waiting for connection from debug server...", session) - while True: - with _lock: - _connections_changed.clear() - conns = (conn for conn in _connections if predicate(conn)) - conn = next(conns, None) - if conn is not None or wait_for_timeout.timed_out: - return conn - _connections_changed.wait() - - -def wait_until_disconnected(): - """Blocks until all debug servers disconnect from the adapter. - - If there are no server connections, waits until at least one is established first, - before waiting for it to disconnect. - """ - while True: - _connections_changed.wait() - with _lock: - _connections_changed.clear() - if not len(_connections): - return - - -def dont_wait_for_first_connection(): - """Unblocks any pending wait_until_disconnected() call that is waiting on the - first server to connect. - """ - with _lock: - _connections_changed.set() - - -def inject(pid, debugpy_args, on_output): - host, port = listener.getsockname() - - cmdline = [ - sys.executable, - os.path.dirname(debugpy.__file__), - "--connect", - host + ":" + str(port), - ] - if adapter.access_token is not None: - cmdline += ["--adapter-access-token", adapter.access_token] - cmdline += debugpy_args - cmdline += ["--pid", str(pid)] - - log.info("Spawning attach-to-PID debugger injector: {0!r}", cmdline) - try: - injector = subprocess.Popen( - cmdline, - bufsize=0, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - ) - except Exception as exc: - log.swallow_exception( - "Failed to inject debug server into process with PID={0}", pid - ) - raise messaging.MessageHandlingError( - "Failed to inject debug server into process with PID={0}: {1}".format( - pid, exc - ) - ) - - # We need to capture the output of the injector - needed so that it doesn't - # get blocked on a write() syscall (besides showing it to the user if it - # is taking longer than expected). - - output_collected = [] - output_collected.append("--- Starting attach to pid: {0} ---\n".format(pid)) - - def capture(stream): - nonlocal output_collected - try: - while True: - line = stream.readline() - if not line: - break - line = line.decode("utf-8", "replace") - output_collected.append(line) - log.info("Injector[PID={0}] output: {1}", pid, line.rstrip()) - log.info("Injector[PID={0}] exited.", pid) - except Exception: - s = io.StringIO() - traceback.print_exc(file=s) - on_output("stderr", s.getvalue()) - - threading.Thread( - target=capture, - name=f"Injector[PID={pid}] stdout", - args=(injector.stdout,), - daemon=True, - ).start() - - def info_on_timeout(): - nonlocal output_collected - taking_longer_than_expected = False - initial_time = time.time() - while True: - time.sleep(1) - returncode = injector.poll() - if returncode is not None: - if returncode != 0: - # Something didn't work out. Let's print more info to the user. - on_output( - "stderr", - "Attach to PID failed.\n\n", - ) - - old = output_collected - output_collected = [] - contents = "".join(old) - on_output("stderr", "".join(contents)) - break - - elapsed = time.time() - initial_time - on_output( - "stdout", "Attaching to PID: %s (elapsed: %.2fs).\n" % (pid, elapsed) - ) - - if not taking_longer_than_expected: - if elapsed > 10: - taking_longer_than_expected = True - if sys.platform in ("linux", "linux2"): - on_output( - "stdout", - "\nThe attach to PID is taking longer than expected.\n", - ) - on_output( - "stdout", - "On Linux it's possible to customize the value of\n", - ) - on_output( - "stdout", - "`PYDEVD_GDB_SCAN_SHARED_LIBRARIES` so that fewer libraries.\n", - ) - on_output( - "stdout", - "are scanned when searching for the needed symbols.\n\n", - ) - on_output( - "stdout", - "i.e.: set in your environment variables (and restart your editor/client\n", - ) - on_output( - "stdout", - "so that it picks up the updated environment variable value):\n\n", - ) - on_output( - "stdout", - "PYDEVD_GDB_SCAN_SHARED_LIBRARIES=libdl, libltdl, libc, libfreebl3\n\n", - ) - on_output( - "stdout", - "-- the actual library may be different (the gdb output typically\n", - ) - on_output( - "stdout", - "-- writes the libraries that will be used, so, it should be possible\n", - ) - on_output( - "stdout", - "-- to test other libraries if the above doesn't work).\n\n", - ) - if taking_longer_than_expected: - # If taking longer than expected, start showing the actual output to the user. - old = output_collected - output_collected = [] - contents = "".join(old) - if contents: - on_output("stderr", contents) - - threading.Thread( - target=info_on_timeout, name=f"Injector[PID={pid}] info on timeout", daemon=True - ).start() diff --git a/spaces/Superlang/ImageProcessor/annotator/mlsd/models/mbv2_mlsd_tiny.py b/spaces/Superlang/ImageProcessor/annotator/mlsd/models/mbv2_mlsd_tiny.py deleted file mode 100644 index e3ed633f2cc23ea1829a627fdb879ab39f641f83..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/mlsd/models/mbv2_mlsd_tiny.py +++ /dev/null @@ -1,275 +0,0 @@ -import os -import sys -import torch -import torch.nn as nn -import torch.utils.model_zoo as model_zoo -from torch.nn import functional as F - - -class BlockTypeA(nn.Module): - def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True): - super(BlockTypeA, self).__init__() - self.conv1 = nn.Sequential( - nn.Conv2d(in_c2, out_c2, kernel_size=1), - nn.BatchNorm2d(out_c2), - nn.ReLU(inplace=True) - ) - self.conv2 = nn.Sequential( - nn.Conv2d(in_c1, out_c1, kernel_size=1), - nn.BatchNorm2d(out_c1), - nn.ReLU(inplace=True) - ) - self.upscale = upscale - - def forward(self, a, b): - b = self.conv1(b) - a = self.conv2(a) - b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True) - return torch.cat((a, b), dim=1) - - -class BlockTypeB(nn.Module): - def __init__(self, in_c, out_c): - super(BlockTypeB, self).__init__() - self.conv1 = nn.Sequential( - nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), - nn.BatchNorm2d(in_c), - nn.ReLU() - ) - self.conv2 = nn.Sequential( - nn.Conv2d(in_c, out_c, kernel_size=3, padding=1), - nn.BatchNorm2d(out_c), - nn.ReLU() - ) - - def forward(self, x): - x = self.conv1(x) + x - x = self.conv2(x) - return x - -class BlockTypeC(nn.Module): - def __init__(self, in_c, out_c): - super(BlockTypeC, self).__init__() - self.conv1 = nn.Sequential( - nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5), - nn.BatchNorm2d(in_c), - nn.ReLU() - ) - self.conv2 = nn.Sequential( - nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), - nn.BatchNorm2d(in_c), - nn.ReLU() - ) - self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1) - - def forward(self, x): - x = self.conv1(x) - x = self.conv2(x) - x = self.conv3(x) - return x - -def _make_divisible(v, divisor, min_value=None): - """ - This function is taken from the original tf repo. - It ensures that all layers have a channel number that is divisible by 8 - It can be seen here: - https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py - :param v: - :param divisor: - :param min_value: - :return: - """ - if min_value is None: - min_value = divisor - new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) - # Make sure that round down does not go down by more than 10%. - if new_v < 0.9 * v: - new_v += divisor - return new_v - - -class ConvBNReLU(nn.Sequential): - def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): - self.channel_pad = out_planes - in_planes - self.stride = stride - #padding = (kernel_size - 1) // 2 - - # TFLite uses slightly different padding than PyTorch - if stride == 2: - padding = 0 - else: - padding = (kernel_size - 1) // 2 - - super(ConvBNReLU, self).__init__( - nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), - nn.BatchNorm2d(out_planes), - nn.ReLU6(inplace=True) - ) - self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride) - - - def forward(self, x): - # TFLite uses different padding - if self.stride == 2: - x = F.pad(x, (0, 1, 0, 1), "constant", 0) - #print(x.shape) - - for module in self: - if not isinstance(module, nn.MaxPool2d): - x = module(x) - return x - - -class InvertedResidual(nn.Module): - def __init__(self, inp, oup, stride, expand_ratio): - super(InvertedResidual, self).__init__() - self.stride = stride - assert stride in [1, 2] - - hidden_dim = int(round(inp * expand_ratio)) - self.use_res_connect = self.stride == 1 and inp == oup - - layers = [] - if expand_ratio != 1: - # pw - layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) - layers.extend([ - # dw - ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), - # pw-linear - nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), - nn.BatchNorm2d(oup), - ]) - self.conv = nn.Sequential(*layers) - - def forward(self, x): - if self.use_res_connect: - return x + self.conv(x) - else: - return self.conv(x) - - -class MobileNetV2(nn.Module): - def __init__(self, pretrained=True): - """ - MobileNet V2 main class - Args: - num_classes (int): Number of classes - width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount - inverted_residual_setting: Network structure - round_nearest (int): Round the number of channels in each layer to be a multiple of this number - Set to 1 to turn off rounding - block: Module specifying inverted residual building block for mobilenet - """ - super(MobileNetV2, self).__init__() - - block = InvertedResidual - input_channel = 32 - last_channel = 1280 - width_mult = 1.0 - round_nearest = 8 - - inverted_residual_setting = [ - # t, c, n, s - [1, 16, 1, 1], - [6, 24, 2, 2], - [6, 32, 3, 2], - [6, 64, 4, 2], - #[6, 96, 3, 1], - #[6, 160, 3, 2], - #[6, 320, 1, 1], - ] - - # only check the first element, assuming user knows t,c,n,s are required - if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: - raise ValueError("inverted_residual_setting should be non-empty " - "or a 4-element list, got {}".format(inverted_residual_setting)) - - # building first layer - input_channel = _make_divisible(input_channel * width_mult, round_nearest) - self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) - features = [ConvBNReLU(4, input_channel, stride=2)] - # building inverted residual blocks - for t, c, n, s in inverted_residual_setting: - output_channel = _make_divisible(c * width_mult, round_nearest) - for i in range(n): - stride = s if i == 0 else 1 - features.append(block(input_channel, output_channel, stride, expand_ratio=t)) - input_channel = output_channel - self.features = nn.Sequential(*features) - - self.fpn_selected = [3, 6, 10] - # weight initialization - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out') - if m.bias is not None: - nn.init.zeros_(m.bias) - elif isinstance(m, nn.BatchNorm2d): - nn.init.ones_(m.weight) - nn.init.zeros_(m.bias) - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - nn.init.zeros_(m.bias) - - #if pretrained: - # self._load_pretrained_model() - - def _forward_impl(self, x): - # This exists since TorchScript doesn't support inheritance, so the superclass method - # (this one) needs to have a name other than `forward` that can be accessed in a subclass - fpn_features = [] - for i, f in enumerate(self.features): - if i > self.fpn_selected[-1]: - break - x = f(x) - if i in self.fpn_selected: - fpn_features.append(x) - - c2, c3, c4 = fpn_features - return c2, c3, c4 - - - def forward(self, x): - return self._forward_impl(x) - - def _load_pretrained_model(self): - pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth') - model_dict = {} - state_dict = self.state_dict() - for k, v in pretrain_dict.items(): - if k in state_dict: - model_dict[k] = v - state_dict.update(model_dict) - self.load_state_dict(state_dict) - - -class MobileV2_MLSD_Tiny(nn.Module): - def __init__(self): - super(MobileV2_MLSD_Tiny, self).__init__() - - self.backbone = MobileNetV2(pretrained=True) - - self.block12 = BlockTypeA(in_c1= 32, in_c2= 64, - out_c1= 64, out_c2=64) - self.block13 = BlockTypeB(128, 64) - - self.block14 = BlockTypeA(in_c1 = 24, in_c2 = 64, - out_c1= 32, out_c2= 32) - self.block15 = BlockTypeB(64, 64) - - self.block16 = BlockTypeC(64, 16) - - def forward(self, x): - c2, c3, c4 = self.backbone(x) - - x = self.block12(c3, c4) - x = self.block13(x) - x = self.block14(c2, x) - x = self.block15(x) - x = self.block16(x) - x = x[:, 7:, :, :] - #print(x.shape) - x = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=True) - - return x \ No newline at end of file diff --git a/spaces/Superlang/ImageProcessor/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/version.py b/spaces/Superlang/ImageProcessor/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/version.py deleted file mode 100644 index a6221b3de7b1490c5e712e8b5fcc94c3d9d04295..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '1.0.2' diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/layers/losses.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/layers/losses.py deleted file mode 100644 index 850a852a2f0986d4d1ce89a526d96db42c76e44f..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/layers/losses.py +++ /dev/null @@ -1,133 +0,0 @@ -import math -import torch - - -def diou_loss( - boxes1: torch.Tensor, - boxes2: torch.Tensor, - reduction: str = "none", - eps: float = 1e-7, -) -> torch.Tensor: - """ - Distance Intersection over Union Loss (Zhaohui Zheng et. al) - https://arxiv.org/abs/1911.08287 - Args: - boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). - reduction: 'none' | 'mean' | 'sum' - 'none': No reduction will be applied to the output. - 'mean': The output will be averaged. - 'sum': The output will be summed. - eps (float): small number to prevent division by zero - """ - - x1, y1, x2, y2 = boxes1.unbind(dim=-1) - x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) - - # TODO: use torch._assert_async() when pytorch 1.8 support is dropped - assert (x2 >= x1).all(), "bad box: x1 larger than x2" - assert (y2 >= y1).all(), "bad box: y1 larger than y2" - - # Intersection keypoints - xkis1 = torch.max(x1, x1g) - ykis1 = torch.max(y1, y1g) - xkis2 = torch.min(x2, x2g) - ykis2 = torch.min(y2, y2g) - - intsct = torch.zeros_like(x1) - mask = (ykis2 > ykis1) & (xkis2 > xkis1) - intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) - union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps - iou = intsct / union - - # smallest enclosing box - xc1 = torch.min(x1, x1g) - yc1 = torch.min(y1, y1g) - xc2 = torch.max(x2, x2g) - yc2 = torch.max(y2, y2g) - diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps - - # centers of boxes - x_p = (x2 + x1) / 2 - y_p = (y2 + y1) / 2 - x_g = (x1g + x2g) / 2 - y_g = (y1g + y2g) / 2 - distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) - - # Eqn. (7) - loss = 1 - iou + (distance / diag_len) - if reduction == "mean": - loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() - elif reduction == "sum": - loss = loss.sum() - - return loss - - -def ciou_loss( - boxes1: torch.Tensor, - boxes2: torch.Tensor, - reduction: str = "none", - eps: float = 1e-7, -) -> torch.Tensor: - """ - Complete Intersection over Union Loss (Zhaohui Zheng et. al) - https://arxiv.org/abs/1911.08287 - Args: - boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). - reduction: 'none' | 'mean' | 'sum' - 'none': No reduction will be applied to the output. - 'mean': The output will be averaged. - 'sum': The output will be summed. - eps (float): small number to prevent division by zero - """ - - x1, y1, x2, y2 = boxes1.unbind(dim=-1) - x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) - - # TODO: use torch._assert_async() when pytorch 1.8 support is dropped - assert (x2 >= x1).all(), "bad box: x1 larger than x2" - assert (y2 >= y1).all(), "bad box: y1 larger than y2" - - # Intersection keypoints - xkis1 = torch.max(x1, x1g) - ykis1 = torch.max(y1, y1g) - xkis2 = torch.min(x2, x2g) - ykis2 = torch.min(y2, y2g) - - intsct = torch.zeros_like(x1) - mask = (ykis2 > ykis1) & (xkis2 > xkis1) - intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) - union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps - iou = intsct / union - - # smallest enclosing box - xc1 = torch.min(x1, x1g) - yc1 = torch.min(y1, y1g) - xc2 = torch.max(x2, x2g) - yc2 = torch.max(y2, y2g) - diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps - - # centers of boxes - x_p = (x2 + x1) / 2 - y_p = (y2 + y1) / 2 - x_g = (x1g + x2g) / 2 - y_g = (y1g + y2g) / 2 - distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) - - # width and height of boxes - w_pred = x2 - x1 - h_pred = y2 - y1 - w_gt = x2g - x1g - h_gt = y2g - y1g - v = (4 / (math.pi**2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) - with torch.no_grad(): - alpha = v / (1 - iou + v + eps) - - # Eqn. (10) - loss = 1 - iou + (distance / diag_len) + alpha * v - if reduction == "mean": - loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() - elif reduction == "sum": - loss = loss.sum() - - return loss diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/fast_scnn.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/fast_scnn.py deleted file mode 100644 index 32fdeb659355a5ce5ef2cc7c2f30742703811cdf..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/fast_scnn.py +++ /dev/null @@ -1,57 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) -model = dict( - type='EncoderDecoder', - backbone=dict( - type='FastSCNN', - downsample_dw_channels=(32, 48), - global_in_channels=64, - global_block_channels=(64, 96, 128), - global_block_strides=(2, 2, 1), - global_out_channels=128, - higher_in_channels=64, - lower_in_channels=128, - fusion_out_channels=128, - out_indices=(0, 1, 2), - norm_cfg=norm_cfg, - align_corners=False), - decode_head=dict( - type='DepthwiseSeparableFCNHead', - in_channels=128, - channels=128, - concat_input=False, - num_classes=19, - in_index=-1, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), - auxiliary_head=[ - dict( - type='FCNHead', - in_channels=128, - channels=32, - num_convs=1, - num_classes=19, - in_index=-2, - norm_cfg=norm_cfg, - concat_input=False, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), - dict( - type='FCNHead', - in_channels=64, - channels=32, - num_convs=1, - num_classes=19, - in_index=-3, - norm_cfg=norm_cfg, - concat_input=False, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), - ], - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/uper_head.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/uper_head.py deleted file mode 100644 index 9e1301b706b0d83ed714bbdee8ee24693f150455..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/uper_head.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead -from .psp_head import PPM - - -@HEADS.register_module() -class UPerHead(BaseDecodeHead): - """Unified Perceptual Parsing for Scene Understanding. - - This head is the implementation of `UPerNet - `_. - - Args: - pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module applied on the last feature. Default: (1, 2, 3, 6). - """ - - def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): - super(UPerHead, self).__init__( - input_transform='multiple_select', **kwargs) - # PSP Module - self.psp_modules = PPM( - pool_scales, - self.in_channels[-1], - self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - align_corners=self.align_corners) - self.bottleneck = ConvModule( - self.in_channels[-1] + len(pool_scales) * self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - # FPN Module - self.lateral_convs = nn.ModuleList() - self.fpn_convs = nn.ModuleList() - for in_channels in self.in_channels[:-1]: # skip the top layer - l_conv = ConvModule( - in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - inplace=False) - fpn_conv = ConvModule( - self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - inplace=False) - self.lateral_convs.append(l_conv) - self.fpn_convs.append(fpn_conv) - - self.fpn_bottleneck = ConvModule( - len(self.in_channels) * self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def psp_forward(self, inputs): - """Forward function of PSP module.""" - x = inputs[-1] - psp_outs = [x] - psp_outs.extend(self.psp_modules(x)) - psp_outs = torch.cat(psp_outs, dim=1) - output = self.bottleneck(psp_outs) - - return output - - def forward(self, inputs): - """Forward function.""" - - inputs = self._transform_inputs(inputs) - - # build laterals - laterals = [ - lateral_conv(inputs[i]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - - laterals.append(self.psp_forward(inputs)) - - # build top-down path - used_backbone_levels = len(laterals) - for i in range(used_backbone_levels - 1, 0, -1): - prev_shape = laterals[i - 1].shape[2:] - laterals[i - 1] += resize( - laterals[i], - size=prev_shape, - mode='bilinear', - align_corners=self.align_corners) - - # build outputs - fpn_outs = [ - self.fpn_convs[i](laterals[i]) - for i in range(used_backbone_levels - 1) - ] - # append psp feature - fpn_outs.append(laterals[-1]) - - for i in range(used_backbone_levels - 1, 0, -1): - fpn_outs[i] = resize( - fpn_outs[i], - size=fpn_outs[0].shape[2:], - mode='bilinear', - align_corners=self.align_corners) - fpn_outs = torch.cat(fpn_outs, dim=1) - output = self.fpn_bottleneck(fpn_outs) - output = self.cls_seg(output) - return output diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/segmentors/encoder_decoder.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/segmentors/encoder_decoder.py deleted file mode 100644 index 98392ac04c4c44a7f4e7b1c0808266875877dd1f..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/segmentors/encoder_decoder.py +++ /dev/null @@ -1,298 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from annotator.uniformer.mmseg.core import add_prefix -from annotator.uniformer.mmseg.ops import resize -from .. import builder -from ..builder import SEGMENTORS -from .base import BaseSegmentor - - -@SEGMENTORS.register_module() -class EncoderDecoder(BaseSegmentor): - """Encoder Decoder segmentors. - - EncoderDecoder typically consists of backbone, decode_head, auxiliary_head. - Note that auxiliary_head is only used for deep supervision during training, - which could be dumped during inference. - """ - - def __init__(self, - backbone, - decode_head, - neck=None, - auxiliary_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(EncoderDecoder, self).__init__() - self.backbone = builder.build_backbone(backbone) - if neck is not None: - self.neck = builder.build_neck(neck) - self._init_decode_head(decode_head) - self._init_auxiliary_head(auxiliary_head) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - self.init_weights(pretrained=pretrained) - - assert self.with_decode_head - - def _init_decode_head(self, decode_head): - """Initialize ``decode_head``""" - self.decode_head = builder.build_head(decode_head) - self.align_corners = self.decode_head.align_corners - self.num_classes = self.decode_head.num_classes - - def _init_auxiliary_head(self, auxiliary_head): - """Initialize ``auxiliary_head``""" - if auxiliary_head is not None: - if isinstance(auxiliary_head, list): - self.auxiliary_head = nn.ModuleList() - for head_cfg in auxiliary_head: - self.auxiliary_head.append(builder.build_head(head_cfg)) - else: - self.auxiliary_head = builder.build_head(auxiliary_head) - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone and heads. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - - super(EncoderDecoder, self).init_weights(pretrained) - self.backbone.init_weights(pretrained=pretrained) - self.decode_head.init_weights() - if self.with_auxiliary_head: - if isinstance(self.auxiliary_head, nn.ModuleList): - for aux_head in self.auxiliary_head: - aux_head.init_weights() - else: - self.auxiliary_head.init_weights() - - def extract_feat(self, img): - """Extract features from images.""" - x = self.backbone(img) - if self.with_neck: - x = self.neck(x) - return x - - def encode_decode(self, img, img_metas): - """Encode images with backbone and decode into a semantic segmentation - map of the same size as input.""" - x = self.extract_feat(img) - out = self._decode_head_forward_test(x, img_metas) - out = resize( - input=out, - size=img.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - return out - - def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): - """Run forward function and calculate loss for decode head in - training.""" - losses = dict() - loss_decode = self.decode_head.forward_train(x, img_metas, - gt_semantic_seg, - self.train_cfg) - - losses.update(add_prefix(loss_decode, 'decode')) - return losses - - def _decode_head_forward_test(self, x, img_metas): - """Run forward function and calculate loss for decode head in - inference.""" - seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg) - return seg_logits - - def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg): - """Run forward function and calculate loss for auxiliary head in - training.""" - losses = dict() - if isinstance(self.auxiliary_head, nn.ModuleList): - for idx, aux_head in enumerate(self.auxiliary_head): - loss_aux = aux_head.forward_train(x, img_metas, - gt_semantic_seg, - self.train_cfg) - losses.update(add_prefix(loss_aux, f'aux_{idx}')) - else: - loss_aux = self.auxiliary_head.forward_train( - x, img_metas, gt_semantic_seg, self.train_cfg) - losses.update(add_prefix(loss_aux, 'aux')) - - return losses - - def forward_dummy(self, img): - """Dummy forward function.""" - seg_logit = self.encode_decode(img, None) - - return seg_logit - - def forward_train(self, img, img_metas, gt_semantic_seg): - """Forward function for training. - - Args: - img (Tensor): Input images. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - - x = self.extract_feat(img) - - losses = dict() - - loss_decode = self._decode_head_forward_train(x, img_metas, - gt_semantic_seg) - losses.update(loss_decode) - - if self.with_auxiliary_head: - loss_aux = self._auxiliary_head_forward_train( - x, img_metas, gt_semantic_seg) - losses.update(loss_aux) - - return losses - - # TODO refactor - def slide_inference(self, img, img_meta, rescale): - """Inference by sliding-window with overlap. - - If h_crop > h_img or w_crop > w_img, the small patch will be used to - decode without padding. - """ - - h_stride, w_stride = self.test_cfg.stride - h_crop, w_crop = self.test_cfg.crop_size - batch_size, _, h_img, w_img = img.size() - num_classes = self.num_classes - h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1 - w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1 - preds = img.new_zeros((batch_size, num_classes, h_img, w_img)) - count_mat = img.new_zeros((batch_size, 1, h_img, w_img)) - for h_idx in range(h_grids): - for w_idx in range(w_grids): - y1 = h_idx * h_stride - x1 = w_idx * w_stride - y2 = min(y1 + h_crop, h_img) - x2 = min(x1 + w_crop, w_img) - y1 = max(y2 - h_crop, 0) - x1 = max(x2 - w_crop, 0) - crop_img = img[:, :, y1:y2, x1:x2] - crop_seg_logit = self.encode_decode(crop_img, img_meta) - preds += F.pad(crop_seg_logit, - (int(x1), int(preds.shape[3] - x2), int(y1), - int(preds.shape[2] - y2))) - - count_mat[:, :, y1:y2, x1:x2] += 1 - assert (count_mat == 0).sum() == 0 - if torch.onnx.is_in_onnx_export(): - # cast count_mat to constant while exporting to ONNX - count_mat = torch.from_numpy( - count_mat.cpu().detach().numpy()).to(device=img.device) - preds = preds / count_mat - if rescale: - preds = resize( - preds, - size=img_meta[0]['ori_shape'][:2], - mode='bilinear', - align_corners=self.align_corners, - warning=False) - return preds - - def whole_inference(self, img, img_meta, rescale): - """Inference with full image.""" - - seg_logit = self.encode_decode(img, img_meta) - if rescale: - # support dynamic shape for onnx - if torch.onnx.is_in_onnx_export(): - size = img.shape[2:] - else: - size = img_meta[0]['ori_shape'][:2] - seg_logit = resize( - seg_logit, - size=size, - mode='bilinear', - align_corners=self.align_corners, - warning=False) - - return seg_logit - - def inference(self, img, img_meta, rescale): - """Inference with slide/whole style. - - Args: - img (Tensor): The input image of shape (N, 3, H, W). - img_meta (dict): Image info dict where each dict has: 'img_shape', - 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - rescale (bool): Whether rescale back to original shape. - - Returns: - Tensor: The output segmentation map. - """ - - assert self.test_cfg.mode in ['slide', 'whole'] - ori_shape = img_meta[0]['ori_shape'] - assert all(_['ori_shape'] == ori_shape for _ in img_meta) - if self.test_cfg.mode == 'slide': - seg_logit = self.slide_inference(img, img_meta, rescale) - else: - seg_logit = self.whole_inference(img, img_meta, rescale) - output = F.softmax(seg_logit, dim=1) - flip = img_meta[0]['flip'] - if flip: - flip_direction = img_meta[0]['flip_direction'] - assert flip_direction in ['horizontal', 'vertical'] - if flip_direction == 'horizontal': - output = output.flip(dims=(3, )) - elif flip_direction == 'vertical': - output = output.flip(dims=(2, )) - - return output - - def simple_test(self, img, img_meta, rescale=True): - """Simple test with single image.""" - seg_logit = self.inference(img, img_meta, rescale) - seg_pred = seg_logit.argmax(dim=1) - if torch.onnx.is_in_onnx_export(): - # our inference backend only support 4D output - seg_pred = seg_pred.unsqueeze(0) - return seg_pred - seg_pred = seg_pred.cpu().numpy() - # unravel batch dim - seg_pred = list(seg_pred) - return seg_pred - - def aug_test(self, imgs, img_metas, rescale=True): - """Test with augmentations. - - Only rescale=True is supported. - """ - # aug_test rescale all imgs back to ori_shape for now - assert rescale - # to save memory, we get augmented seg logit inplace - seg_logit = self.inference(imgs[0], img_metas[0], rescale) - for i in range(1, len(imgs)): - cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale) - seg_logit += cur_seg_logit - seg_logit /= len(imgs) - seg_pred = seg_logit.argmax(dim=1) - seg_pred = seg_pred.cpu().numpy() - # unravel batch dim - seg_pred = list(seg_pred) - return seg_pred diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/monkey.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/monkey.py deleted file mode 100644 index 50653fc7ee41cd529c8413bd9b797ca801eb2dfa..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/monkey.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -Monkey patching of distutils. -""" - -import sys -import distutils.filelist -import platform -import types -import functools -from importlib import import_module -import inspect - -import setuptools - -__all__ = [] -""" -Everything is private. Contact the project team -if you think you need this functionality. -""" - - -def _get_mro(cls): - """ - Returns the bases classes for cls sorted by the MRO. - - Works around an issue on Jython where inspect.getmro will not return all - base classes if multiple classes share the same name. Instead, this - function will return a tuple containing the class itself, and the contents - of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024. - """ - if platform.python_implementation() == "Jython": - return (cls,) + cls.__bases__ - return inspect.getmro(cls) - - -def get_unpatched(item): - lookup = ( - get_unpatched_class if isinstance(item, type) else - get_unpatched_function if isinstance(item, types.FunctionType) else - lambda item: None - ) - return lookup(item) - - -def get_unpatched_class(cls): - """Protect against re-patching the distutils if reloaded - - Also ensures that no other distutils extension monkeypatched the distutils - first. - """ - external_bases = ( - cls - for cls in _get_mro(cls) - if not cls.__module__.startswith('setuptools') - ) - base = next(external_bases) - if not base.__module__.startswith('distutils'): - msg = "distutils has already been patched by %r" % cls - raise AssertionError(msg) - return base - - -def patch_all(): - # we can't patch distutils.cmd, alas - distutils.core.Command = setuptools.Command - - has_issue_12885 = sys.version_info <= (3, 5, 3) - - if has_issue_12885: - # fix findall bug in distutils (http://bugs.python.org/issue12885) - distutils.filelist.findall = setuptools.findall - - needs_warehouse = ( - (3, 4) < sys.version_info < (3, 4, 6) - or - (3, 5) < sys.version_info <= (3, 5, 3) - ) - - if needs_warehouse: - warehouse = 'https://upload.pypi.org/legacy/' - distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse - - _patch_distribution_metadata() - - # Install Distribution throughout the distutils - for module in distutils.dist, distutils.core, distutils.cmd: - module.Distribution = setuptools.dist.Distribution - - # Install the patched Extension - distutils.core.Extension = setuptools.extension.Extension - distutils.extension.Extension = setuptools.extension.Extension - if 'distutils.command.build_ext' in sys.modules: - sys.modules['distutils.command.build_ext'].Extension = ( - setuptools.extension.Extension - ) - - patch_for_msvc_specialized_compiler() - - -def _patch_distribution_metadata(): - """Patch write_pkg_file and read_pkg_file for higher metadata standards""" - for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'): - new_val = getattr(setuptools.dist, attr) - setattr(distutils.dist.DistributionMetadata, attr, new_val) - - -def patch_func(replacement, target_mod, func_name): - """ - Patch func_name in target_mod with replacement - - Important - original must be resolved by name to avoid - patching an already patched function. - """ - original = getattr(target_mod, func_name) - - # set the 'unpatched' attribute on the replacement to - # point to the original. - vars(replacement).setdefault('unpatched', original) - - # replace the function in the original module - setattr(target_mod, func_name, replacement) - - -def get_unpatched_function(candidate): - return getattr(candidate, 'unpatched') - - -def patch_for_msvc_specialized_compiler(): - """ - Patch functions in distutils to use standalone Microsoft Visual C++ - compilers. - """ - # import late to avoid circular imports on Python < 3.5 - msvc = import_module('setuptools.msvc') - - if platform.system() != 'Windows': - # Compilers only available on Microsoft Windows - return - - def patch_params(mod_name, func_name): - """ - Prepare the parameters for patch_func to patch indicated function. - """ - repl_prefix = 'msvc14_' - repl_name = repl_prefix + func_name.lstrip('_') - repl = getattr(msvc, repl_name) - mod = import_module(mod_name) - if not hasattr(mod, func_name): - raise ImportError(func_name) - return repl, mod, func_name - - # Python 3.5+ - msvc14 = functools.partial(patch_params, 'distutils._msvccompiler') - - try: - # Patch distutils._msvccompiler._get_vc_env - patch_func(*msvc14('_get_vc_env')) - except ImportError: - pass diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/layers/test_deformable.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/layers/test_deformable.py deleted file mode 100644 index 4aa319fc7e614f6a7a8ece7a45c177211c03012d..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/layers/test_deformable.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -import unittest -import torch - -from detectron2.layers import DeformConv, ModulatedDeformConv -from detectron2.utils.env import TORCH_VERSION - - -@unittest.skipIf( - TORCH_VERSION == (1, 8) and torch.cuda.is_available(), - "This test fails under cuda11 + torch1.8.", -) -class DeformableTest(unittest.TestCase): - @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") - def test_forward_output(self): - device = torch.device("cuda") - N, C, H, W = shape = 1, 1, 5, 5 - kernel_size = 3 - padding = 1 - - inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device) - """ - 0 1 2 3 4 - 5 6 7 8 9 - 10 11 12 13 14 - 15 16 17 18 19 - 20 21 22 23 24 - """ - offset_channels = kernel_size * kernel_size * 2 - offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device) - - # Test DCN v1 - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight)) - output = deform(inputs, offset) - output = output.detach().cpu().numpy() - deform_results = np.array( - [ - [30, 41.25, 48.75, 45, 28.75], - [62.25, 81, 90, 80.25, 50.25], - [99.75, 126, 135, 117.75, 72.75], - [105, 131.25, 138.75, 120, 73.75], - [71.75, 89.25, 93.75, 80.75, 49.5], - ] - ) - self.assertTrue(np.allclose(output.flatten(), deform_results.flatten())) - - # Test DCN v2 - mask_channels = kernel_size * kernel_size - mask = torch.full((N, mask_channels, H, W), 0.5, dtype=torch.float32).to(device) - modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to( - device - ) - modulate_deform.weight = deform.weight - output = modulate_deform(inputs, offset, mask) - output = output.detach().cpu().numpy() - self.assertTrue(np.allclose(output.flatten(), deform_results.flatten() * 0.5)) - - def test_forward_output_on_cpu(self): - device = torch.device("cpu") - N, C, H, W = shape = 1, 1, 5, 5 - kernel_size = 3 - padding = 1 - - inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device) - - offset_channels = kernel_size * kernel_size * 2 - offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device) - - # Test DCN v1 on cpu - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight)) - output = deform(inputs, offset) - output = output.detach().cpu().numpy() - deform_results = np.array( - [ - [30, 41.25, 48.75, 45, 28.75], - [62.25, 81, 90, 80.25, 50.25], - [99.75, 126, 135, 117.75, 72.75], - [105, 131.25, 138.75, 120, 73.75], - [71.75, 89.25, 93.75, 80.75, 49.5], - ] - ) - self.assertTrue(np.allclose(output.flatten(), deform_results.flatten())) - - @unittest.skipIf(not torch.cuda.is_available(), "This test requires gpu access") - def test_forward_output_on_cpu_equals_output_on_gpu(self): - N, C, H, W = shape = 2, 4, 10, 10 - kernel_size = 3 - padding = 1 - - for groups in [1, 2]: - inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape) - offset_channels = kernel_size * kernel_size * 2 - offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32) - - deform_gpu = DeformConv( - C, C, kernel_size=kernel_size, padding=padding, groups=groups - ).to("cuda") - deform_gpu.weight = torch.nn.Parameter(torch.ones_like(deform_gpu.weight)) - output_gpu = deform_gpu(inputs.to("cuda"), offset.to("cuda")).detach().cpu().numpy() - - deform_cpu = DeformConv( - C, C, kernel_size=kernel_size, padding=padding, groups=groups - ).to("cpu") - deform_cpu.weight = torch.nn.Parameter(torch.ones_like(deform_cpu.weight)) - output_cpu = deform_cpu(inputs.to("cpu"), offset.to("cpu")).detach().numpy() - - self.assertTrue(np.allclose(output_gpu.flatten(), output_cpu.flatten())) - - @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") - def test_small_input(self): - device = torch.device("cuda") - for kernel_size in [3, 5]: - padding = kernel_size // 2 - N, C, H, W = shape = (1, 1, kernel_size - 1, kernel_size - 1) - - inputs = torch.rand(shape).to(device) # input size is smaller than kernel size - - offset_channels = kernel_size * kernel_size * 2 - offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - output = deform(inputs, offset) - self.assertTrue(output.shape == inputs.shape) - - mask_channels = kernel_size * kernel_size - mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device) - modulate_deform = ModulatedDeformConv( - C, C, kernel_size, padding=padding, bias=False - ).to(device) - output = modulate_deform(inputs, offset, mask) - self.assertTrue(output.shape == inputs.shape) - - @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") - def test_raise_exception(self): - device = torch.device("cuda") - N, C, H, W = shape = 1, 1, 3, 3 - kernel_size = 3 - padding = 1 - - inputs = torch.rand(shape, dtype=torch.float32).to(device) - offset_channels = kernel_size * kernel_size # This is wrong channels for offset - offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - self.assertRaises(RuntimeError, deform, inputs, offset) - - offset_channels = kernel_size * kernel_size * 2 - offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) - mask_channels = kernel_size * kernel_size * 2 # This is wrong channels for mask - mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device) - modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to( - device - ) - self.assertRaises(RuntimeError, modulate_deform, inputs, offset, mask) - - def test_repr(self): - module = DeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2) - correct_string = ( - "DeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), " - "stride=(1, 1), padding=(1, 1), dilation=(1, 1), " - "groups=1, deformable_groups=2, bias=False)" - ) - self.assertEqual(repr(module), correct_string) - - module = ModulatedDeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2) - correct_string = ( - "ModulatedDeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), " - "stride=1, padding=1, dilation=1, groups=1, deformable_groups=2, bias=True)" - ) - self.assertEqual(repr(module), correct_string) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/structures/__init__.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/structures/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ThirdEyeData/Customer-Conversion-Prediction/app.py b/spaces/ThirdEyeData/Customer-Conversion-Prediction/app.py deleted file mode 100644 index 8f36378911fb6162a33e935655f498479076ee76..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Customer-Conversion-Prediction/app.py +++ /dev/null @@ -1,179 +0,0 @@ -import os -import sys -from random import randint -import time -import uuid -import argparse -sys.path.append(os.path.abspath("../supv")) -from matumizi.util import * -from mcclf import * -import streamlit as st - - -def genVisitHistory(numUsers, convRate, label): - for i in range(numUsers): - userID = genID(12) - userSess = [] - userSess.append(userID) - - conv = randint(0, 100) - if (conv < convRate): - #converted - if (label): - if (randint(0,100) < 90): - userSess.append("T") - else: - userSess.append("F") - - - numSession = randint(2, 20) - for j in range(numSession): - sess = randint(0, 100) - if (sess <= 15): - elapsed = "H" - elif (sess > 15 and sess <= 40): - elapsed = "M" - else: - elapsed = "L" - - sess = randint(0, 100) - if (sess <= 15): - duration = "L" - elif (sess > 15 and sess <= 40): - duration = "M" - else: - duration = "H" - - sessSummary = elapsed + duration - userSess.append(sessSummary) - - - else: - #not converted - if (label): - if (randint(0,100) < 90): - userSess.append("F") - else: - userSess.append("T") - - numSession = randint(2, 12) - for j in range(numSession): - sess = randint(0, 100) - if (sess <= 20): - elapsed = "L" - elif (sess > 20 and sess <= 45): - elapsed = "M" - else: - elapsed = "H" - - sess = randint(0, 100) - if (sess <= 20): - duration = "H" - elif (sess > 20 and sess <= 45): - duration = "M" - else: - duration = "L" - - sessSummary = elapsed + duration - userSess.append(sessSummary) - - st.write(",".join(userSess)) - - -def main(): - st.set_page_config(page_title="Customer Conversion Prediction", page_icon=":guardsman:", layout="wide") - st.title("Customer Conversion Prediction") - - # # Add sidebar - # st.sidebar.title("Navigation") - # app_mode = st.sidebar.selectbox("Choose the app mode", - # ["Instructions", "Generate User Visit History", "Train Model", "Predict Conversion"]) - - # Add sidebar - st.sidebar.title("Navigation") - app_mode = st.sidebar.selectbox("Choose the App Mode", - ["Instructions", "Generate User Visit History", "Predict Conversion"]) - - if app_mode == "Instructions": - st.write("Welcome to the Markov Chain Classifier app!") - # st.write("This app allows you to generate user visit history, train a Markov Chain Classifier model, and predict conversion.") - st.write("This app allows you to generate user visit history and predict conversion of the visitor into customer.") - st.write("To get started, use the sidebar to navigate to the desired functionality.") - st.write("1. **Generate User Visit History**: Select the number of users and conversion rate, and click the 'Generate' button to generate user visit history.") - # st.write("2. **Train Model**: Upload an ML config file using the file uploader, and click the 'Train' button to train the Markov Chain Classifier model.") - st.write("2. **Predict Conversion**: Enter the User ID for which you want to predict, and click the 'Predict' button to make predictions whether the visitor will likely to convert into customer or not.") - - # Description of MarkovChainClassifier - mcclf_description = "The MarkovChainClassifier is a Machine Learning Classifier that utilizes the concept of Markov chains for prediction. Markov chains are mathematical models that represent a system where the future state of the system depends only on its current state, and not on the previous states. The MarkovChainClassifier uses this concept to make predictions by modeling the transition probabilities between different states or categories in the input data. It captures the probabilistic relationships between variables and uses them to classify new data points into one or more predefined categories. The MarkovChainClassifier can be useful in scenarios where the data has a sequential or time-dependent structure, and the relationships between variables can be modeled as Markov chains. It can be applied to various tasks, such as text classification, speech recognition, recommendation systems, and financial forecasting." - # Display the description in Streamlit app - st.header("Model description:") - st.write(mcclf_description) - - elif app_mode == "Generate User Visit History": - st.subheader("Generate User Visit History") - num_users = st.number_input("Number of users", min_value=1, max_value=10000, value=100, step=1) - conv_rate = st.slider("Conversion rate", min_value=0, max_value=100, value=10, step=1) - add_label = st.checkbox("Add label", value=False) - if st.button("Generate"): - genVisitHistory(num_users, conv_rate, add_label) - - # elif app_mode == "Train Model": - # st.subheader("Train Model") - # mlf_path = st.file_uploader("Upload ML config file") - # if st.button("Train"): - # if mlf_path is not None: - # model = MarkovChainClassifier(mlf_path) - # model.train() - - elif app_mode == "Predict Conversion": - st.subheader("Predict Conversion") - - # Create an instance of MarkovChainClassifier with the ML config file - model = MarkovChainClassifier("mcclf_cc.properties") - - # Get user input for userID - user_id = st.text_input("Enter User ID") - - # Check if the "Predict" button was clicked - if st.button("Predict"): - - # Call the predict method of the MarkovChainClassifier instance - pred = model.predict() - if pred == 'F': - st.write(f"UserID: {user_id}, Prediction: Visitor is unlikely to convert into a customer.") - else: - st.write(f"UserID: {user_id}, Prediction: Visitor is likely to convert into a customer.") - - # st.subheader("Predict Conversion") - # # Upload ML config file using Streamlit's file_uploader function - # mlf_file = st.file_uploader("Upload ML config file", type=["properties"]) - - # # Check if ML config file was uploaded - # if mlf_file is not None: - # # Save the uploaded file to a local file - # with open("mcclf_cc.properties", "wb") as f: - # f.write(mlf_file.read()) - - # # Create an instance of MarkovChainClassifier with the uploaded ML config file - # model = MarkovChainClassifier("mcclf_cc.properties") - - # # # Load the model from cc.mod - # # model = MarkovChainClassifier.load_model("cc.mod") - - # # Get user input for userID - # user_id = st.text_input("Enter User ID") - - # # Check if the "Predict" button was clicked - # if st.button("Predict"): - # # Load the saved model - # # model.load_model("cc.mod") - - # # Call the predict method of the MarkovChainClassifier instance - # pred = model.predict() - # if pred == 'T': - # st.write(f"UserID: {user_id}, Prediction: Visitor is likely to convert into a customer.") - # else: - # st.write(f"UserID: {user_id}, Prediction: Visitor is unlikely to convert into a customer.") - -if __name__ == "__main__": - main() diff --git a/spaces/UdayPrasad/fashion-mnist/app.py b/spaces/UdayPrasad/fashion-mnist/app.py deleted file mode 100644 index dff8ba432b4f5732855e28074381d08aa743dead..0000000000000000000000000000000000000000 --- a/spaces/UdayPrasad/fashion-mnist/app.py +++ /dev/null @@ -1,54 +0,0 @@ -import scipy -import gradio as gr -import numpy as np -import tensorflow as tf -from tensorflow import keras -from keras.models import load_model -import pickle - -def fashion_MNIST_prediction(test_image, model='KNN'): - test_image_flatten = test_image.reshape((-1, 28*28)) - fashion_mnist = keras.datasets.fashion_mnist - (X_train, y_train), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() - class_names = ("T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot") - img_shape = X_train.shape - n_samples = img_shape[0] - width = img_shape[1] - height = img_shape[2] - x_train_flatten = X_train.reshape(n_samples, width*height) - if model == 'KNN': - from sklearn.neighbors import KNeighborsClassifier - KNN_classifier = KNeighborsClassifier(n_neighbors=5) - KNN_classifier.fit(x_train_flatten, y_train) - ans = KNN_classifier.predict(test_image_flatten) - ans_prediction = KNN_classifier.predict_proba(test_image_flatten) - return class_names[ans[0]], dict(zip(class_names, map(float,ans_prediction[0]))) - elif model == 'SoftMax': - with open('softmax_model.pkl', 'rb') as file: - softmax_model_loaded = pickle.load(file) - ans = softmax_model_loaded.predict(test_image_flatten) - ans_prediction = softmax_model_loaded.predict_proba(test_image_flatten) - return class_names[ans[0]], dict(zip(class_names, map(float,ans_prediction[0]))) - elif model == 'NeuralNetwork_Shallow': - nn_shallow_loaded_model = load_model("nn_model_1.h5") - ans_prediction = nn_shallow_loaded_model.predict(np.asarray(test_image_flatten)) - ans = np.argmax(ans_prediction) - return class_names[ans], dict(zip(class_names, map(float,ans_prediction[0]))) - else: - nn_deep_loaded_model = load_model("deep_nn_model_1.h5") - ans_prediction = nn_deep_loaded_model.predict(np.asarray(test_image_flatten)) - ans = np.argmax(ans_prediction) - return class_names[ans], dict(zip(class_names, map(float,ans_prediction[0]))) - return class_names[0] - -input_image = gr.inputs.Image(shape=(28, 28), image_mode='L') -input_model = gr.inputs.Dropdown(['KNN', 'SoftMax', 'NeuralNetwork_Shallow', 'NeuralNetwork_Deep']) - -output_label = gr.outputs.Textbox(label="Predicted Label") -output_probability = gr.outputs.Label(num_top_classes=10, label="Predicted Probability Per Class") - -gr.Interface(fn=fashion_MNIST_prediction, - inputs = [input_image, input_model], - outputs = [output_label, output_probability], - title = "Fashion MNIST classification", - ).launch(debug=True) \ No newline at end of file diff --git a/spaces/Uppuluri/mychatbotai/README.md b/spaces/Uppuluri/mychatbotai/README.md deleted file mode 100644 index b76cdeed39420e3436a2809fc2fbd5433cb00ca4..0000000000000000000000000000000000000000 --- a/spaces/Uppuluri/mychatbotai/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Mychatbotai -emoji: 👁 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py b/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py deleted file mode 100644 index 1c66194deb5dd370e797e57e2712f44303e568cc..0000000000000000000000000000000000000000 --- a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py +++ /dev/null @@ -1,802 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# DINO -# Copyright (c) 2022 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# -------------------------------------------------------- -# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py -# -------------------------------------------------------- - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from groundingdino.util.misc import NestedTensor - - -class Mlp(nn.Module): - """Multilayer perceptron.""" - - def __init__( - self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0 - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - """Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__( - self, - dim, - window_size, - num_heads, - qkv_bias=True, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - ): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) - ) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=0.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """Forward function. - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = ( - self.qkv(x) - .reshape(B_, N, 3, self.num_heads, C // self.num_heads) - .permute(2, 0, 3, 1, 4) - ) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = q @ k.transpose(-2, -1) - - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.view(-1) - ].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 - ) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute( - 2, 0, 1 - ).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SwinTransformerBlock(nn.Module): - """Swin Transformer Block. - Args: - dim (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__( - self, - dim, - num_heads, - window_size=7, - shift_size=0, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - act_layer=nn.GELU, - norm_layer=nn.LayerNorm, - ): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, - window_size=to_2tuple(self.window_size), - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop, - ) - - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop - ) - - self.H = None - self.W = None - - def forward(self, x, mask_matrix): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - mask_matrix: Attention mask for cyclic shift. - """ - B, L, C = x.shape - H, W = self.H, self.W - assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # pad feature maps to multiples of window size - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - attn_mask = mask_matrix - else: - shifted_x = x - attn_mask = None - - # partition windows - x_windows = window_partition( - shifted_x, self.window_size - ) # nW*B, window_size, window_size, C - x_windows = x_windows.view( - -1, self.window_size * self.window_size, C - ) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - -class PatchMerging(nn.Module): - """Patch Merging Layer - Args: - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x, H, W): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - - x = x.view(B, H, W, C) - - # padding - pad_input = (H % 2 == 1) or (W % 2 == 1) - if pad_input: - x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - -class BasicLayer(nn.Module): - """A basic Swin Transformer layer for one stage. - Args: - dim (int): Number of feature channels - depth (int): Depths of this stage. - num_heads (int): Number of attention head. - window_size (int): Local window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__( - self, - dim, - depth, - num_heads, - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False, - ): - super().__init__() - self.window_size = window_size - self.shift_size = window_size // 2 - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList( - [ - SwinTransformerBlock( - dim=dim, - num_heads=num_heads, - window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer, - ) - for i in range(depth) - ] - ) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, H, W): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - - # calculate attention mask for SW-MSA - Hp = int(np.ceil(H / self.window_size)) * self.window_size - Wp = int(np.ceil(W / self.window_size)) * self.window_size - img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 - h_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - w_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition( - img_mask, self.window_size - ) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( - attn_mask == 0, float(0.0) - ) - - for blk in self.blocks: - blk.H, blk.W = H, W - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, attn_mask) - else: - x = blk(x, attn_mask) - if self.downsample is not None: - x_down = self.downsample(x, H, W) - Wh, Ww = (H + 1) // 2, (W + 1) // 2 - return x, H, W, x_down, Wh, Ww - else: - return x, H, W, x, H, W - - -class PatchEmbed(nn.Module): - """Image to Patch Embedding - Args: - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - patch_size = to_2tuple(patch_size) - self.patch_size = patch_size - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - """Forward function.""" - # padding - _, _, H, W = x.size() - if W % self.patch_size[1] != 0: - x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) - if H % self.patch_size[0] != 0: - x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) - - x = self.proj(x) # B C Wh Ww - if self.norm is not None: - Wh, Ww = x.size(2), x.size(3) - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) - - return x - - -class SwinTransformer(nn.Module): - """Swin Transformer backbone. - A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - - https://arxiv.org/pdf/2103.14030 - Args: - pretrain_img_size (int): Input image size for training the pretrained model, - used in absolute postion embedding. Default 224. - patch_size (int | tuple(int)): Patch size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - depths (tuple[int]): Depths of each Swin Transformer stage. - num_heads (tuple[int]): Number of attention head of each stage. - window_size (int): Window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. - drop_rate (float): Dropout rate. - attn_drop_rate (float): Attention dropout rate. Default: 0. - drop_path_rate (float): Stochastic depth rate. Default: 0.2. - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. - patch_norm (bool): If True, add normalization after patch embedding. Default: True. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - dilation (bool): if True, the output size if 16x downsample, ow 32x downsample. - """ - - def __init__( - self, - pretrain_img_size=224, - patch_size=4, - in_chans=3, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop_rate=0.0, - attn_drop_rate=0.0, - drop_path_rate=0.2, - norm_layer=nn.LayerNorm, - ape=False, - patch_norm=True, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - dilation=False, - use_checkpoint=False, - ): - super().__init__() - - self.pretrain_img_size = pretrain_img_size - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.dilation = dilation - - # if use_checkpoint: - # print("use_checkpoint!!!!!!!!!!!!!!!!!!!!!!!!") - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - patch_size=patch_size, - in_chans=in_chans, - embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None, - ) - - # absolute position embedding - if self.ape: - pretrain_img_size = to_2tuple(pretrain_img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [ - pretrain_img_size[0] // patch_size[0], - pretrain_img_size[1] // patch_size[1], - ] - - self.absolute_pos_embed = nn.Parameter( - torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]) - ) - trunc_normal_(self.absolute_pos_embed, std=0.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) - ] # stochastic depth decay rule - - # build layers - self.layers = nn.ModuleList() - # prepare downsample list - downsamplelist = [PatchMerging for i in range(self.num_layers)] - downsamplelist[-1] = None - num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)] - if self.dilation: - downsamplelist[-2] = None - num_features[-1] = int(embed_dim * 2 ** (self.num_layers - 1)) // 2 - for i_layer in range(self.num_layers): - layer = BasicLayer( - # dim=int(embed_dim * 2 ** i_layer), - dim=num_features[i_layer], - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], - norm_layer=norm_layer, - # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, - downsample=downsamplelist[i_layer], - use_checkpoint=use_checkpoint, - ) - self.layers.append(layer) - - # num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] - self.num_features = num_features - - # add a norm layer for each output - for i_layer in out_indices: - layer = norm_layer(num_features[i_layer]) - layer_name = f"norm{i_layer}" - self.add_module(layer_name, layer) - - self._freeze_stages() - - def _freeze_stages(self): - if self.frozen_stages >= 0: - self.patch_embed.eval() - for param in self.patch_embed.parameters(): - param.requires_grad = False - - if self.frozen_stages >= 1 and self.ape: - self.absolute_pos_embed.requires_grad = False - - if self.frozen_stages >= 2: - self.pos_drop.eval() - for i in range(0, self.frozen_stages - 1): - m = self.layers[i] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - # def init_weights(self, pretrained=None): - # """Initialize the weights in backbone. - # Args: - # pretrained (str, optional): Path to pre-trained weights. - # Defaults to None. - # """ - - # def _init_weights(m): - # if isinstance(m, nn.Linear): - # trunc_normal_(m.weight, std=.02) - # if isinstance(m, nn.Linear) and m.bias is not None: - # nn.init.constant_(m.bias, 0) - # elif isinstance(m, nn.LayerNorm): - # nn.init.constant_(m.bias, 0) - # nn.init.constant_(m.weight, 1.0) - - # if isinstance(pretrained, str): - # self.apply(_init_weights) - # logger = get_root_logger() - # load_checkpoint(self, pretrained, strict=False, logger=logger) - # elif pretrained is None: - # self.apply(_init_weights) - # else: - # raise TypeError('pretrained must be a str or None') - - def forward_raw(self, x): - """Forward function.""" - x = self.patch_embed(x) - - Wh, Ww = x.size(2), x.size(3) - if self.ape: - # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" - ) - x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C - else: - x = x.flatten(2).transpose(1, 2) - x = self.pos_drop(x) - - outs = [] - for i in range(self.num_layers): - layer = self.layers[i] - x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) - # import ipdb; ipdb.set_trace() - - if i in self.out_indices: - norm_layer = getattr(self, f"norm{i}") - x_out = norm_layer(x_out) - - out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() - outs.append(out) - # in: - # torch.Size([2, 3, 1024, 1024]) - # outs: - # [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \ - # torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])] - return tuple(outs) - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - - """Forward function.""" - x = self.patch_embed(x) - - Wh, Ww = x.size(2), x.size(3) - if self.ape: - # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" - ) - x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C - else: - x = x.flatten(2).transpose(1, 2) - x = self.pos_drop(x) - - outs = [] - for i in range(self.num_layers): - layer = self.layers[i] - x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) - - if i in self.out_indices: - norm_layer = getattr(self, f"norm{i}") - x_out = norm_layer(x_out) - - out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() - outs.append(out) - # in: - # torch.Size([2, 3, 1024, 1024]) - # out: - # [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \ - # torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])] - - # collect for nesttensors - outs_dict = {} - for idx, out_i in enumerate(outs): - m = tensor_list.mask - assert m is not None - mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0] - outs_dict[idx] = NestedTensor(out_i, mask) - - return outs_dict - - def train(self, mode=True): - """Convert the model into training mode while keep layers freezed.""" - super(SwinTransformer, self).train(mode) - self._freeze_stages() - - -def build_swin_transformer(modelname, pretrain_img_size, **kw): - assert modelname in [ - "swin_T_224_1k", - "swin_B_224_22k", - "swin_B_384_22k", - "swin_L_224_22k", - "swin_L_384_22k", - ] - - model_para_dict = { - "swin_T_224_1k": dict( - embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7 - ), - "swin_B_224_22k": dict( - embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7 - ), - "swin_B_384_22k": dict( - embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12 - ), - "swin_L_224_22k": dict( - embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7 - ), - "swin_L_384_22k": dict( - embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12 - ), - } - kw_cgf = model_para_dict[modelname] - kw_cgf.update(kw) - model = SwinTransformer(pretrain_img_size=pretrain_img_size, **kw_cgf) - return model - - -if __name__ == "__main__": - model = build_swin_transformer("swin_L_384_22k", 384, dilation=True) - x = torch.rand(2, 3, 1024, 1024) - y = model.forward_raw(x) - import ipdb - - ipdb.set_trace() - x = torch.rand(2, 3, 384, 384) - y = model.forward_raw(x) diff --git a/spaces/Weyaxi/open-llm-leaderboard-renamer/README.md b/spaces/Weyaxi/open-llm-leaderboard-renamer/README.md deleted file mode 100644 index ad9e199fcb2fdf211518eeddc6d55b8c90033022..0000000000000000000000000000000000000000 --- a/spaces/Weyaxi/open-llm-leaderboard-renamer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Open LLM Leaderboard Renamer -emoji: 🔀 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/WindVChen/INR-Harmon/hrnet_ocr.py b/spaces/WindVChen/INR-Harmon/hrnet_ocr.py deleted file mode 100644 index c01a2c1c86fad7978d39fed3bd2cc78bcd6e49c7..0000000000000000000000000000000000000000 --- a/spaces/WindVChen/INR-Harmon/hrnet_ocr.py +++ /dev/null @@ -1,400 +0,0 @@ -import os -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch._utils - -from .ocr import SpatialOCR_Module, SpatialGather_Module -from .resnetv1b import BasicBlockV1b, BottleneckV1b - -relu_inplace = True - - -class HighResolutionModule(nn.Module): - def __init__(self, num_branches, blocks, num_blocks, num_inchannels, - num_channels, fuse_method,multi_scale_output=True, - norm_layer=nn.BatchNorm2d, align_corners=True): - super(HighResolutionModule, self).__init__() - self._check_branches(num_branches, num_blocks, num_inchannels, num_channels) - - self.num_inchannels = num_inchannels - self.fuse_method = fuse_method - self.num_branches = num_branches - self.norm_layer = norm_layer - self.align_corners = align_corners - - self.multi_scale_output = multi_scale_output - - self.branches = self._make_branches( - num_branches, blocks, num_blocks, num_channels) - self.fuse_layers = self._make_fuse_layers() - self.relu = nn.ReLU(inplace=relu_inplace) - - def _check_branches(self, num_branches, num_blocks, num_inchannels, num_channels): - if num_branches != len(num_blocks): - error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format( - num_branches, len(num_blocks)) - raise ValueError(error_msg) - - if num_branches != len(num_channels): - error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format( - num_branches, len(num_channels)) - raise ValueError(error_msg) - - if num_branches != len(num_inchannels): - error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format( - num_branches, len(num_inchannels)) - raise ValueError(error_msg) - - def _make_one_branch(self, branch_index, block, num_blocks, num_channels, - stride=1): - downsample = None - if stride != 1 or \ - self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.num_inchannels[branch_index], - num_channels[branch_index] * block.expansion, - kernel_size=1, stride=stride, bias=False), - self.norm_layer(num_channels[branch_index] * block.expansion), - ) - - layers = [] - layers.append(block(self.num_inchannels[branch_index], - num_channels[branch_index], stride, - downsample=downsample, norm_layer=self.norm_layer)) - self.num_inchannels[branch_index] = \ - num_channels[branch_index] * block.expansion - for i in range(1, num_blocks[branch_index]): - layers.append(block(self.num_inchannels[branch_index], - num_channels[branch_index], - norm_layer=self.norm_layer)) - - return nn.Sequential(*layers) - - def _make_branches(self, num_branches, block, num_blocks, num_channels): - branches = [] - - for i in range(num_branches): - branches.append( - self._make_one_branch(i, block, num_blocks, num_channels)) - - return nn.ModuleList(branches) - - def _make_fuse_layers(self): - if self.num_branches == 1: - return None - - num_branches = self.num_branches - num_inchannels = self.num_inchannels - fuse_layers = [] - for i in range(num_branches if self.multi_scale_output else 1): - fuse_layer = [] - for j in range(num_branches): - if j > i: - fuse_layer.append(nn.Sequential( - nn.Conv2d(in_channels=num_inchannels[j], - out_channels=num_inchannels[i], - kernel_size=1, - bias=False), - self.norm_layer(num_inchannels[i]))) - elif j == i: - fuse_layer.append(None) - else: - conv3x3s = [] - for k in range(i - j): - if k == i - j - 1: - num_outchannels_conv3x3 = num_inchannels[i] - conv3x3s.append(nn.Sequential( - nn.Conv2d(num_inchannels[j], - num_outchannels_conv3x3, - kernel_size=3, stride=2, padding=1, bias=False), - self.norm_layer(num_outchannels_conv3x3))) - else: - num_outchannels_conv3x3 = num_inchannels[j] - conv3x3s.append(nn.Sequential( - nn.Conv2d(num_inchannels[j], - num_outchannels_conv3x3, - kernel_size=3, stride=2, padding=1, bias=False), - self.norm_layer(num_outchannels_conv3x3), - nn.ReLU(inplace=relu_inplace))) - fuse_layer.append(nn.Sequential(*conv3x3s)) - fuse_layers.append(nn.ModuleList(fuse_layer)) - - return nn.ModuleList(fuse_layers) - - def get_num_inchannels(self): - return self.num_inchannels - - def forward(self, x): - if self.num_branches == 1: - return [self.branches[0](x[0])] - - for i in range(self.num_branches): - x[i] = self.branches[i](x[i]) - - x_fuse = [] - for i in range(len(self.fuse_layers)): - y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) - for j in range(1, self.num_branches): - if i == j: - y = y + x[j] - elif j > i: - width_output = x[i].shape[-1] - height_output = x[i].shape[-2] - y = y + F.interpolate( - self.fuse_layers[i][j](x[j]), - size=[height_output, width_output], - mode='bilinear', align_corners=self.align_corners) - else: - y = y + self.fuse_layers[i][j](x[j]) - x_fuse.append(self.relu(y)) - - return x_fuse - - -class HighResolutionNet(nn.Module): - def __init__(self, width, num_classes, ocr_width=256, small=False, - norm_layer=nn.BatchNorm2d, align_corners=True, opt=None): - super(HighResolutionNet, self).__init__() - self.opt = opt - self.norm_layer = norm_layer - self.width = width - self.ocr_width = ocr_width - self.ocr_on = ocr_width > 0 - self.align_corners = align_corners - - self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False) - self.bn1 = norm_layer(64) - self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) - self.bn2 = norm_layer(64) - self.relu = nn.ReLU(inplace=relu_inplace) - - num_blocks = 2 if small else 4 - - stage1_num_channels = 64 - self.layer1 = self._make_layer(BottleneckV1b, 64, stage1_num_channels, blocks=num_blocks) - stage1_out_channel = BottleneckV1b.expansion * stage1_num_channels - - self.stage2_num_branches = 2 - num_channels = [width, 2 * width] - num_inchannels = [ - num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))] - self.transition1 = self._make_transition_layer( - [stage1_out_channel], num_inchannels) - self.stage2, pre_stage_channels = self._make_stage( - BasicBlockV1b, num_inchannels=num_inchannels, num_modules=1, num_branches=self.stage2_num_branches, - num_blocks=2 * [num_blocks], num_channels=num_channels) - - self.stage3_num_branches = 3 - num_channels = [width, 2 * width, 4 * width] - num_inchannels = [ - num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))] - self.transition2 = self._make_transition_layer( - pre_stage_channels, num_inchannels) - self.stage3, pre_stage_channels = self._make_stage( - BasicBlockV1b, num_inchannels=num_inchannels, - num_modules=3 if small else 4, num_branches=self.stage3_num_branches, - num_blocks=3 * [num_blocks], num_channels=num_channels) - - self.stage4_num_branches = 4 - num_channels = [width, 2 * width, 4 * width, 8 * width] - num_inchannels = [ - num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))] - self.transition3 = self._make_transition_layer( - pre_stage_channels, num_inchannels) - self.stage4, pre_stage_channels = self._make_stage( - BasicBlockV1b, num_inchannels=num_inchannels, num_modules=2 if small else 3, - num_branches=self.stage4_num_branches, - num_blocks=4 * [num_blocks], num_channels=num_channels) - - if self.ocr_on: - last_inp_channels = np.int(np.sum(pre_stage_channels)) - ocr_mid_channels = 2 * ocr_width - ocr_key_channels = ocr_width - - self.conv3x3_ocr = nn.Sequential( - nn.Conv2d(last_inp_channels, ocr_mid_channels, - kernel_size=3, stride=1, padding=1), - norm_layer(ocr_mid_channels), - nn.ReLU(inplace=relu_inplace), - ) - self.ocr_gather_head = SpatialGather_Module(num_classes) - - self.ocr_distri_head = SpatialOCR_Module(in_channels=ocr_mid_channels, - key_channels=ocr_key_channels, - out_channels=ocr_mid_channels, - scale=1, - dropout=0.05, - norm_layer=norm_layer, - align_corners=align_corners, opt=opt) - - def _make_transition_layer( - self, num_channels_pre_layer, num_channels_cur_layer): - num_branches_cur = len(num_channels_cur_layer) - num_branches_pre = len(num_channels_pre_layer) - - transition_layers = [] - for i in range(num_branches_cur): - if i < num_branches_pre: - if num_channels_cur_layer[i] != num_channels_pre_layer[i]: - transition_layers.append(nn.Sequential( - nn.Conv2d(num_channels_pre_layer[i], - num_channels_cur_layer[i], - kernel_size=3, - stride=1, - padding=1, - bias=False), - self.norm_layer(num_channels_cur_layer[i]), - nn.ReLU(inplace=relu_inplace))) - else: - transition_layers.append(None) - else: - conv3x3s = [] - for j in range(i + 1 - num_branches_pre): - inchannels = num_channels_pre_layer[-1] - outchannels = num_channels_cur_layer[i] \ - if j == i - num_branches_pre else inchannels - conv3x3s.append(nn.Sequential( - nn.Conv2d(inchannels, outchannels, - kernel_size=3, stride=2, padding=1, bias=False), - self.norm_layer(outchannels), - nn.ReLU(inplace=relu_inplace))) - transition_layers.append(nn.Sequential(*conv3x3s)) - - return nn.ModuleList(transition_layers) - - def _make_layer(self, block, inplanes, planes, blocks, stride=1): - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - self.norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append(block(inplanes, planes, stride, - downsample=downsample, norm_layer=self.norm_layer)) - inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(inplanes, planes, norm_layer=self.norm_layer)) - - return nn.Sequential(*layers) - - def _make_stage(self, block, num_inchannels, - num_modules, num_branches, num_blocks, num_channels, - fuse_method='SUM', - multi_scale_output=True): - modules = [] - for i in range(num_modules): - # multi_scale_output is only used last module - if not multi_scale_output and i == num_modules - 1: - reset_multi_scale_output = False - else: - reset_multi_scale_output = True - modules.append( - HighResolutionModule(num_branches, - block, - num_blocks, - num_inchannels, - num_channels, - fuse_method, - reset_multi_scale_output, - norm_layer=self.norm_layer, - align_corners=self.align_corners) - ) - num_inchannels = modules[-1].get_num_inchannels() - - return nn.Sequential(*modules), num_inchannels - - def forward(self, x, mask=None, additional_features=None): - hrnet_feats = self.compute_hrnet_feats(x, additional_features) - if not self.ocr_on: - return hrnet_feats, - - ocr_feats = self.conv3x3_ocr(hrnet_feats) - mask = nn.functional.interpolate(mask, size=ocr_feats.size()[2:], mode='bilinear', align_corners=True) - context = self.ocr_gather_head(ocr_feats, mask) - ocr_feats = self.ocr_distri_head(ocr_feats, context) - return ocr_feats, - - def compute_hrnet_feats(self, x, additional_features, return_list=False): - x = self.compute_pre_stage_features(x, additional_features) - x = self.layer1(x) - - x_list = [] - for i in range(self.stage2_num_branches): - if self.transition1[i] is not None: - x_list.append(self.transition1[i](x)) - else: - x_list.append(x) - y_list = self.stage2(x_list) - - x_list = [] - for i in range(self.stage3_num_branches): - if self.transition2[i] is not None: - if i < self.stage2_num_branches: - x_list.append(self.transition2[i](y_list[i])) - else: - x_list.append(self.transition2[i](y_list[-1])) - else: - x_list.append(y_list[i]) - y_list = self.stage3(x_list) - - x_list = [] - for i in range(self.stage4_num_branches): - if self.transition3[i] is not None: - if i < self.stage3_num_branches: - x_list.append(self.transition3[i](y_list[i])) - else: - x_list.append(self.transition3[i](y_list[-1])) - else: - x_list.append(y_list[i]) - x = self.stage4(x_list) - - if return_list: - return x - - # Upsampling - x0_h, x0_w = x[0].size(2), x[0].size(3) - x1 = F.interpolate(x[1], size=(x0_h, x0_w), - mode='bilinear', align_corners=self.align_corners) - x2 = F.interpolate(x[2], size=(x0_h, x0_w), - mode='bilinear', align_corners=self.align_corners) - x3 = F.interpolate(x[3], size=(x0_h, x0_w), - mode='bilinear', align_corners=self.align_corners) - - return torch.cat([x[0], x1, x2, x3], 1) - - def compute_pre_stage_features(self, x, additional_features): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - if additional_features is not None: - x = x + additional_features - x = self.conv2(x) - x = self.bn2(x) - return self.relu(x) - - def load_pretrained_weights(self, pretrained_path=''): - model_dict = self.state_dict() - - if not os.path.exists(pretrained_path): - print(f'\nFile "{pretrained_path}" does not exist.') - print('You need to specify the correct path to the pre-trained weights.\n' - 'You can download the weights for HRNet from the repository:\n' - 'https://github.com/HRNet/HRNet-Image-Classification') - exit(1) - pretrained_dict = torch.load(pretrained_path, map_location={'cuda:0': 'cpu'}) - pretrained_dict = {k.replace('last_layer', 'aux_head').replace('model.', ''): v for k, v in - pretrained_dict.items()} - params_count = len(pretrained_dict) - - pretrained_dict = {k: v for k, v in pretrained_dict.items() - if k in model_dict.keys()} - - # print(f'Loaded {len(pretrained_dict)} of {params_count} pretrained parameters for HRNet') - - model_dict.update(pretrained_dict) - self.load_state_dict(model_dict) diff --git a/spaces/WindVChen/INR-Harmon/model/hrnetv2/hrnet_ocr.py b/spaces/WindVChen/INR-Harmon/model/hrnetv2/hrnet_ocr.py deleted file mode 100644 index 7a7e98a03be0f9c8a44b5c171d4e64704af77d82..0000000000000000000000000000000000000000 --- a/spaces/WindVChen/INR-Harmon/model/hrnetv2/hrnet_ocr.py +++ /dev/null @@ -1,400 +0,0 @@ -import os -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch._utils - -from .ocr import SpatialOCR_Module, SpatialGather_Module -from .resnetv1b import BasicBlockV1b, BottleneckV1b - -relu_inplace = True - - -class HighResolutionModule(nn.Module): - def __init__(self, num_branches, blocks, num_blocks, num_inchannels, - num_channels, fuse_method,multi_scale_output=True, - norm_layer=nn.BatchNorm2d, align_corners=True): - super(HighResolutionModule, self).__init__() - self._check_branches(num_branches, num_blocks, num_inchannels, num_channels) - - self.num_inchannels = num_inchannels - self.fuse_method = fuse_method - self.num_branches = num_branches - self.norm_layer = norm_layer - self.align_corners = align_corners - - self.multi_scale_output = multi_scale_output - - self.branches = self._make_branches( - num_branches, blocks, num_blocks, num_channels) - self.fuse_layers = self._make_fuse_layers() - self.relu = nn.ReLU(inplace=relu_inplace) - - def _check_branches(self, num_branches, num_blocks, num_inchannels, num_channels): - if num_branches != len(num_blocks): - error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format( - num_branches, len(num_blocks)) - raise ValueError(error_msg) - - if num_branches != len(num_channels): - error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format( - num_branches, len(num_channels)) - raise ValueError(error_msg) - - if num_branches != len(num_inchannels): - error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format( - num_branches, len(num_inchannels)) - raise ValueError(error_msg) - - def _make_one_branch(self, branch_index, block, num_blocks, num_channels, - stride=1): - downsample = None - if stride != 1 or \ - self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.num_inchannels[branch_index], - num_channels[branch_index] * block.expansion, - kernel_size=1, stride=stride, bias=False), - self.norm_layer(num_channels[branch_index] * block.expansion), - ) - - layers = [] - layers.append(block(self.num_inchannels[branch_index], - num_channels[branch_index], stride, - downsample=downsample, norm_layer=self.norm_layer)) - self.num_inchannels[branch_index] = \ - num_channels[branch_index] * block.expansion - for i in range(1, num_blocks[branch_index]): - layers.append(block(self.num_inchannels[branch_index], - num_channels[branch_index], - norm_layer=self.norm_layer)) - - return nn.Sequential(*layers) - - def _make_branches(self, num_branches, block, num_blocks, num_channels): - branches = [] - - for i in range(num_branches): - branches.append( - self._make_one_branch(i, block, num_blocks, num_channels)) - - return nn.ModuleList(branches) - - def _make_fuse_layers(self): - if self.num_branches == 1: - return None - - num_branches = self.num_branches - num_inchannels = self.num_inchannels - fuse_layers = [] - for i in range(num_branches if self.multi_scale_output else 1): - fuse_layer = [] - for j in range(num_branches): - if j > i: - fuse_layer.append(nn.Sequential( - nn.Conv2d(in_channels=num_inchannels[j], - out_channels=num_inchannels[i], - kernel_size=1, - bias=False), - self.norm_layer(num_inchannels[i]))) - elif j == i: - fuse_layer.append(None) - else: - conv3x3s = [] - for k in range(i - j): - if k == i - j - 1: - num_outchannels_conv3x3 = num_inchannels[i] - conv3x3s.append(nn.Sequential( - nn.Conv2d(num_inchannels[j], - num_outchannels_conv3x3, - kernel_size=3, stride=2, padding=1, bias=False), - self.norm_layer(num_outchannels_conv3x3))) - else: - num_outchannels_conv3x3 = num_inchannels[j] - conv3x3s.append(nn.Sequential( - nn.Conv2d(num_inchannels[j], - num_outchannels_conv3x3, - kernel_size=3, stride=2, padding=1, bias=False), - self.norm_layer(num_outchannels_conv3x3), - nn.ReLU(inplace=relu_inplace))) - fuse_layer.append(nn.Sequential(*conv3x3s)) - fuse_layers.append(nn.ModuleList(fuse_layer)) - - return nn.ModuleList(fuse_layers) - - def get_num_inchannels(self): - return self.num_inchannels - - def forward(self, x): - if self.num_branches == 1: - return [self.branches[0](x[0])] - - for i in range(self.num_branches): - x[i] = self.branches[i](x[i]) - - x_fuse = [] - for i in range(len(self.fuse_layers)): - y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) - for j in range(1, self.num_branches): - if i == j: - y = y + x[j] - elif j > i: - width_output = x[i].shape[-1] - height_output = x[i].shape[-2] - y = y + F.interpolate( - self.fuse_layers[i][j](x[j]), - size=[height_output, width_output], - mode='bilinear', align_corners=self.align_corners) - else: - y = y + self.fuse_layers[i][j](x[j]) - x_fuse.append(self.relu(y)) - - return x_fuse - - -class HighResolutionNet(nn.Module): - def __init__(self, width, num_classes, ocr_width=256, small=False, - norm_layer=nn.BatchNorm2d, align_corners=True, opt=None): - super(HighResolutionNet, self).__init__() - self.opt = opt - self.norm_layer = norm_layer - self.width = width - self.ocr_width = ocr_width - self.ocr_on = ocr_width > 0 - self.align_corners = align_corners - - self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False) - self.bn1 = norm_layer(64) - self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) - self.bn2 = norm_layer(64) - self.relu = nn.ReLU(inplace=relu_inplace) - - num_blocks = 2 if small else 4 - - stage1_num_channels = 64 - self.layer1 = self._make_layer(BottleneckV1b, 64, stage1_num_channels, blocks=num_blocks) - stage1_out_channel = BottleneckV1b.expansion * stage1_num_channels - - self.stage2_num_branches = 2 - num_channels = [width, 2 * width] - num_inchannels = [ - num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))] - self.transition1 = self._make_transition_layer( - [stage1_out_channel], num_inchannels) - self.stage2, pre_stage_channels = self._make_stage( - BasicBlockV1b, num_inchannels=num_inchannels, num_modules=1, num_branches=self.stage2_num_branches, - num_blocks=2 * [num_blocks], num_channels=num_channels) - - self.stage3_num_branches = 3 - num_channels = [width, 2 * width, 4 * width] - num_inchannels = [ - num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))] - self.transition2 = self._make_transition_layer( - pre_stage_channels, num_inchannels) - self.stage3, pre_stage_channels = self._make_stage( - BasicBlockV1b, num_inchannels=num_inchannels, - num_modules=3 if small else 4, num_branches=self.stage3_num_branches, - num_blocks=3 * [num_blocks], num_channels=num_channels) - - self.stage4_num_branches = 4 - num_channels = [width, 2 * width, 4 * width, 8 * width] - num_inchannels = [ - num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))] - self.transition3 = self._make_transition_layer( - pre_stage_channels, num_inchannels) - self.stage4, pre_stage_channels = self._make_stage( - BasicBlockV1b, num_inchannels=num_inchannels, num_modules=2 if small else 3, - num_branches=self.stage4_num_branches, - num_blocks=4 * [num_blocks], num_channels=num_channels) - - if self.ocr_on: - last_inp_channels = np.int(np.sum(pre_stage_channels)) - ocr_mid_channels = 2 * ocr_width - ocr_key_channels = ocr_width - - self.conv3x3_ocr = nn.Sequential( - nn.Conv2d(last_inp_channels, ocr_mid_channels, - kernel_size=3, stride=1, padding=1), - norm_layer(ocr_mid_channels), - nn.ReLU(inplace=relu_inplace), - ) - self.ocr_gather_head = SpatialGather_Module(num_classes) - - self.ocr_distri_head = SpatialOCR_Module(in_channels=ocr_mid_channels, - key_channels=ocr_key_channels, - out_channels=ocr_mid_channels, - scale=1, - dropout=0.05, - norm_layer=norm_layer, - align_corners=align_corners, opt=opt) - - def _make_transition_layer( - self, num_channels_pre_layer, num_channels_cur_layer): - num_branches_cur = len(num_channels_cur_layer) - num_branches_pre = len(num_channels_pre_layer) - - transition_layers = [] - for i in range(num_branches_cur): - if i < num_branches_pre: - if num_channels_cur_layer[i] != num_channels_pre_layer[i]: - transition_layers.append(nn.Sequential( - nn.Conv2d(num_channels_pre_layer[i], - num_channels_cur_layer[i], - kernel_size=3, - stride=1, - padding=1, - bias=False), - self.norm_layer(num_channels_cur_layer[i]), - nn.ReLU(inplace=relu_inplace))) - else: - transition_layers.append(None) - else: - conv3x3s = [] - for j in range(i + 1 - num_branches_pre): - inchannels = num_channels_pre_layer[-1] - outchannels = num_channels_cur_layer[i] \ - if j == i - num_branches_pre else inchannels - conv3x3s.append(nn.Sequential( - nn.Conv2d(inchannels, outchannels, - kernel_size=3, stride=2, padding=1, bias=False), - self.norm_layer(outchannels), - nn.ReLU(inplace=relu_inplace))) - transition_layers.append(nn.Sequential(*conv3x3s)) - - return nn.ModuleList(transition_layers) - - def _make_layer(self, block, inplanes, planes, blocks, stride=1): - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - self.norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append(block(inplanes, planes, stride, - downsample=downsample, norm_layer=self.norm_layer)) - inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(inplanes, planes, norm_layer=self.norm_layer)) - - return nn.Sequential(*layers) - - def _make_stage(self, block, num_inchannels, - num_modules, num_branches, num_blocks, num_channels, - fuse_method='SUM', - multi_scale_output=True): - modules = [] - for i in range(num_modules): - # multi_scale_output is only used last module - if not multi_scale_output and i == num_modules - 1: - reset_multi_scale_output = False - else: - reset_multi_scale_output = True - modules.append( - HighResolutionModule(num_branches, - block, - num_blocks, - num_inchannels, - num_channels, - fuse_method, - reset_multi_scale_output, - norm_layer=self.norm_layer, - align_corners=self.align_corners) - ) - num_inchannels = modules[-1].get_num_inchannels() - - return nn.Sequential(*modules), num_inchannels - - def forward(self, x, mask=None, additional_features=None): - hrnet_feats = self.compute_hrnet_feats(x, additional_features) - if not self.ocr_on: - return hrnet_feats, - - ocr_feats = self.conv3x3_ocr(hrnet_feats) - mask = nn.functional.interpolate(mask, size=ocr_feats.size()[2:], mode='bilinear', align_corners=True) - context = self.ocr_gather_head(ocr_feats, mask) - ocr_feats = self.ocr_distri_head(ocr_feats, context) - return ocr_feats, - - def compute_hrnet_feats(self, x, additional_features, return_list=False): - x = self.compute_pre_stage_features(x, additional_features) - x = self.layer1(x) - - x_list = [] - for i in range(self.stage2_num_branches): - if self.transition1[i] is not None: - x_list.append(self.transition1[i](x)) - else: - x_list.append(x) - y_list = self.stage2(x_list) - - x_list = [] - for i in range(self.stage3_num_branches): - if self.transition2[i] is not None: - if i < self.stage2_num_branches: - x_list.append(self.transition2[i](y_list[i])) - else: - x_list.append(self.transition2[i](y_list[-1])) - else: - x_list.append(y_list[i]) - y_list = self.stage3(x_list) - - x_list = [] - for i in range(self.stage4_num_branches): - if self.transition3[i] is not None: - if i < self.stage3_num_branches: - x_list.append(self.transition3[i](y_list[i])) - else: - x_list.append(self.transition3[i](y_list[-1])) - else: - x_list.append(y_list[i]) - x = self.stage4(x_list) - - if return_list: - return x - - # Upsampling - x0_h, x0_w = x[0].size(2), x[0].size(3) - x1 = F.interpolate(x[1], size=(x0_h, x0_w), - mode='bilinear', align_corners=self.align_corners) - x2 = F.interpolate(x[2], size=(x0_h, x0_w), - mode='bilinear', align_corners=self.align_corners) - x3 = F.interpolate(x[3], size=(x0_h, x0_w), - mode='bilinear', align_corners=self.align_corners) - - return torch.cat([x[0], x1, x2, x3], 1) - - def compute_pre_stage_features(self, x, additional_features): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - if additional_features is not None: - x = x + additional_features - x = self.conv2(x) - x = self.bn2(x) - return self.relu(x) - - def load_pretrained_weights(self, pretrained_path=''): - model_dict = self.state_dict() - - if not os.path.exists(pretrained_path): - print(f'\nFile "{pretrained_path}" does not exist.') - print('You need to specify the correct path to the pre-trained weights.\n' - 'You can download the weights for HRNet from the repository:\n' - 'https://github.com/HRNet/HRNet-Image-Classification') - exit(1) - pretrained_dict = torch.load(pretrained_path, map_location={'cuda:0': 'cpu'}) - pretrained_dict = {k.replace('last_layer', 'aux_head').replace('model.', ''): v for k, v in - pretrained_dict.items()} - params_count = len(pretrained_dict) - - pretrained_dict = {k: v for k, v in pretrained_dict.items() - if k in model_dict.keys()} - - print(f'Loaded {len(pretrained_dict)} of {params_count} pretrained parameters for HRNet') - - model_dict.update(pretrained_dict) - self.load_state_dict(model_dict) diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/callbacks/loss_metrics.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/callbacks/loss_metrics.py deleted file mode 100644 index 2d623f24513735a64fc6a5249c806a73f83fd812..0000000000000000000000000000000000000000 --- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/callbacks/loss_metrics.py +++ /dev/null @@ -1,34 +0,0 @@ -from ..torch_core import * -from ..callback import * -from ..basic_train import Learner, LearnerCallback - -__all__ = ['LossMetrics'] - -class LossMetrics(LearnerCallback): - "Add `loss_func.metrics` to metrics named by `loss_func.metric_names`" - _order = -20 #Needs to run before the recorder - - def on_train_begin(self, **kwargs): - "Add the metrics names to the `Recorder`." - self.names = ifnone(self.learn.loss_func.metric_names, []) - if not self.names: warn('LossMetrics requested but no loss_func.metric_names provided') - self.learn.recorder.add_metric_names(self.names) - - def on_epoch_begin(self, **kwargs): - "Initialize the metrics for this epoch." - self.metrics = {name:0. for name in self.names} - self.nums = 0 - - def on_batch_end(self, last_target, train, **kwargs): - "Update the metrics if not `train`" - if train: return - bs = last_target.size(0) - for name in self.names: - self.metrics[name] += bs * self.learn.loss_func.metrics[name].detach().cpu() - self.nums += bs - - def on_epoch_end(self, last_metrics, **kwargs): - "Finish the computation and sends the result to the Recorder." - if not self.nums: return - metrics = [self.metrics[name]/self.nums for name in self.names] - return {'last_metrics': last_metrics+metrics} diff --git a/spaces/Xenova/semantic-image-search/src/app/globals.css b/spaces/Xenova/semantic-image-search/src/app/globals.css deleted file mode 100644 index e76e819792f5447d626ae577b5467617601bd1af..0000000000000000000000000000000000000000 --- a/spaces/Xenova/semantic-image-search/src/app/globals.css +++ /dev/null @@ -1,19 +0,0 @@ -@tailwind base; -@tailwind components; -@tailwind utilities; - -:root { - --foreground-rgb: 255, 255, 255; - --background-start-rgb: 0, 0, 0; - --background-end-rgb: 0, 0, 0; -} - -body { - color: rgb(var(--foreground-rgb)); - background: linear-gradient( - to bottom, - transparent, - rgb(var(--background-end-rgb)) - ) - rgb(var(--background-start-rgb)); -} diff --git a/spaces/YUANAI/DiffspeechResearch/modules/vocoder/hifigan/mel_utils.py b/spaces/YUANAI/DiffspeechResearch/modules/vocoder/hifigan/mel_utils.py deleted file mode 100644 index a75fce72db54812320bc60aedfdd378ccecb3374..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/modules/vocoder/hifigan/mel_utils.py +++ /dev/null @@ -1,80 +0,0 @@ -import numpy as np -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read - -MAX_WAV_VALUE = 32768.0 - - -def load_wav(full_path): - sampling_rate, data = read(full_path) - return data, sampling_rate - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def mel_spectrogram(y, hparams, center=False, complex=False): - # hop_size: 512 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate) - # win_size: 2048 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate) - # fmin: 55 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) - # fmax: 10000 # To be increased/reduced depending on data. - # fft_size: 2048 # Extra window size is filled with 0 paddings to match this parameter - # n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, - n_fft = hparams['fft_size'] - num_mels = hparams['audio_num_mel_bins'] - sampling_rate = hparams['audio_sample_rate'] - hop_size = hparams['hop_size'] - win_size = hparams['win_size'] - fmin = hparams['fmin'] - fmax = hparams['fmax'] - y = y.clamp(min=-1., max=1.) - global mel_basis, hann_window - if fmax not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[str(fmax) + '_' + str(y.device)] = torch.from_numpy(mel).float().to(y.device) - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), [int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)], - mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - if not complex: - spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) - spec = torch.matmul(mel_basis[str(fmax) + '_' + str(y.device)], spec) - spec = spectral_normalize_torch(spec) - else: - B, C, T, _ = spec.shape - spec = spec.transpose(1, 2) # [B, T, n_fft, 2] - return spec diff --git a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/util/visualizer.py b/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/util/visualizer.py deleted file mode 100644 index 7a1b7b101e9b73f75f9136bc67f2063c7c1cf1c1..0000000000000000000000000000000000000000 --- a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/util/visualizer.py +++ /dev/null @@ -1,318 +0,0 @@ -# -*- coding: utf-8 -*- -""" -@File : visualizer.py -@Time : 2022/04/05 11:39:33 -@Author : Shilong Liu -@Contact : slongliu86@gmail.com -""" - -import datetime -import os - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import torch -from matplotlib import transforms -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon -from pycocotools import mask as maskUtils - - -def renorm( - img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] -) -> torch.FloatTensor: - # img: tensor(3,H,W) or tensor(B,3,H,W) - # return: same as img - assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() - if img.dim() == 3: - assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( - img.size(0), - str(img.size()), - ) - img_perm = img.permute(1, 2, 0) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(2, 0, 1) - else: # img.dim() == 4 - assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( - img.size(1), - str(img.size()), - ) - img_perm = img.permute(0, 2, 3, 1) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(0, 3, 1, 2) - - -class ColorMap: - def __init__(self, basergb=[255, 255, 0]): - self.basergb = np.array(basergb) - - def __call__(self, attnmap): - # attnmap: h, w. np.uint8. - # return: h, w, 4. np.uint8. - assert attnmap.dtype == np.uint8 - h, w = attnmap.shape - res = self.basergb.copy() - res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3 - attn1 = attnmap.copy()[..., None] # h, w, 1 - res = np.concatenate((res, attn1), axis=-1).astype(np.uint8) - return res - - -def rainbow_text(x, y, ls, lc, **kw): - """ - Take a list of strings ``ls`` and colors ``lc`` and place them next to each - other, with text ls[i] being shown in color lc[i]. - - This example shows how to do both vertical and horizontal text, and will - pass all keyword arguments to plt.text, so you can set the font size, - family, etc. - """ - t = plt.gca().transData - fig = plt.gcf() - plt.show() - - # horizontal version - for s, c in zip(ls, lc): - text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw) - text.draw(fig.canvas.get_renderer()) - ex = text.get_window_extent() - t = transforms.offset_copy(text._transform, x=ex.width, units="dots") - - # #vertical version - # for s,c in zip(ls,lc): - # text = plt.text(x,y," "+s+" ",color=c, transform=t, - # rotation=90,va='bottom',ha='center',**kw) - # text.draw(fig.canvas.get_renderer()) - # ex = text.get_window_extent() - # t = transforms.offset_copy(text._transform, y=ex.height, units='dots') - - -class COCOVisualizer: - def __init__(self, coco=None, tokenlizer=None) -> None: - self.coco = coco - - def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"): - """ - img: tensor(3, H, W) - tgt: make sure they are all on cpu. - must have items: 'image_id', 'boxes', 'size' - """ - plt.figure(dpi=dpi) - plt.rcParams["font.size"] = "5" - ax = plt.gca() - img = renorm(img).permute(1, 2, 0) - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - ax.imshow(img) - - self.addtgt(tgt) - - if tgt is None: - image_id = 0 - elif "image_id" not in tgt: - image_id = 0 - else: - image_id = tgt["image_id"] - - if caption is None: - savename = "{}/{}-{}.png".format( - savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - else: - savename = "{}/{}-{}-{}.png".format( - savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - print("savename: {}".format(savename)) - os.makedirs(os.path.dirname(savename), exist_ok=True) - plt.savefig(savename) - plt.close() - - def addtgt(self, tgt): - """ """ - if tgt is None or not "boxes" in tgt: - ax = plt.gca() - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - - ax.set_axis_off() - return - - ax = plt.gca() - H, W = tgt["size"] - numbox = tgt["boxes"].shape[0] - - color = [] - polygons = [] - boxes = [] - for box in tgt["boxes"].cpu(): - unnormbbox = box * torch.Tensor([W, H, W, H]) - unnormbbox[:2] -= unnormbbox[2:] / 2 - [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist() - boxes.append([bbox_x, bbox_y, bbox_w, bbox_h]) - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - color.append(c) - - p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1) - ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - - if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0: - assert ( - len(tgt["strings_positive"]) == numbox - ), f"{len(tgt['strings_positive'])} = {numbox}, " - for idx, strlist in enumerate(tgt["strings_positive"]): - cate_id = int(tgt["labels"][idx]) - _string = str(cate_id) + ":" + " ".join(strlist) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "box_label" in tgt: - assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, " - for idx, bl in enumerate(tgt["box_label"]): - _string = str(bl) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - # plt.figure() - # rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(), - # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black']) - - if "attn" in tgt: - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - if isinstance(tgt["attn"], tuple): - tgt["attn"] = [tgt["attn"]] - for item in tgt["attn"]: - attn_map, basergb = item - attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3) - attn_map = (attn_map * 255).astype(np.uint8) - cm = ColorMap(basergb) - heatmap = cm(attn_map) - ax.imshow(heatmap) - ax.set_axis_off() - - def showAnns(self, anns, draw_bbox=False): - """ - Display the specified annotations. - :param anns (array of object): annotations to display - :return: None - """ - if len(anns) == 0: - return 0 - if "segmentation" in anns[0] or "keypoints" in anns[0]: - datasetType = "instances" - elif "caption" in anns[0]: - datasetType = "captions" - else: - raise Exception("datasetType not supported") - if datasetType == "instances": - ax = plt.gca() - ax.set_autoscale_on(False) - polygons = [] - color = [] - for ann in anns: - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - if "segmentation" in ann: - if type(ann["segmentation"]) == list: - # polygon - for seg in ann["segmentation"]: - poly = np.array(seg).reshape((int(len(seg) / 2), 2)) - polygons.append(Polygon(poly)) - color.append(c) - else: - # mask - t = self.imgs[ann["image_id"]] - if type(ann["segmentation"]["counts"]) == list: - rle = maskUtils.frPyObjects( - [ann["segmentation"]], t["height"], t["width"] - ) - else: - rle = [ann["segmentation"]] - m = maskUtils.decode(rle) - img = np.ones((m.shape[0], m.shape[1], 3)) - if ann["iscrowd"] == 1: - color_mask = np.array([2.0, 166.0, 101.0]) / 255 - if ann["iscrowd"] == 0: - color_mask = np.random.random((1, 3)).tolist()[0] - for i in range(3): - img[:, :, i] = color_mask[i] - ax.imshow(np.dstack((img, m * 0.5))) - if "keypoints" in ann and type(ann["keypoints"]) == list: - # turn skeleton into zero-based index - sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1 - kp = np.array(ann["keypoints"]) - x = kp[0::3] - y = kp[1::3] - v = kp[2::3] - for sk in sks: - if np.all(v[sk] > 0): - plt.plot(x[sk], y[sk], linewidth=3, color=c) - plt.plot( - x[v > 0], - y[v > 0], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor="k", - markeredgewidth=2, - ) - plt.plot( - x[v > 1], - y[v > 1], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor=c, - markeredgewidth=2, - ) - - if draw_bbox: - [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"] - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - color.append(c) - - # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) - # ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - elif datasetType == "captions": - for ann in anns: - print(ann["caption"]) diff --git a/spaces/YouLiXiya/Mobile-SAM/segment_anything/segment_anything/modeling/mask_decoder.py b/spaces/YouLiXiya/Mobile-SAM/segment_anything/segment_anything/modeling/mask_decoder.py deleted file mode 100644 index c36c7b553c9df986dab91474de06d171feb7f93d..0000000000000000000000000000000000000000 --- a/spaces/YouLiXiya/Mobile-SAM/segment_anything/segment_anything/modeling/mask_decoder.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from torch import nn -from torch.nn import functional as F - -from typing import List, Tuple, Type - -from .common import LayerNorm2d - - -class MaskDecoder(nn.Module): - def __init__( - self, - *, - transformer_dim: int, - transformer: nn.Module, - num_multimask_outputs: int = 3, - activation: Type[nn.Module] = nn.GELU, - iou_head_depth: int = 3, - iou_head_hidden_dim: int = 256, - ) -> None: - """ - Predicts masks given an image and prompt embeddings, using a - transformer architecture. - - Arguments: - transformer_dim (int): the channel dimension of the transformer - transformer (nn.Module): the transformer used to predict masks - num_multimask_outputs (int): the number of masks to predict - when disambiguating masks - activation (nn.Module): the type of activation to use when - upscaling masks - iou_head_depth (int): the depth of the MLP used to predict - mask quality - iou_head_hidden_dim (int): the hidden dimension of the MLP - used to predict mask quality - """ - super().__init__() - self.transformer_dim = transformer_dim - self.transformer = transformer - - self.num_multimask_outputs = num_multimask_outputs - - self.iou_token = nn.Embedding(1, transformer_dim) - self.num_mask_tokens = num_multimask_outputs + 1 - self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) - - self.output_upscaling = nn.Sequential( - nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), - LayerNorm2d(transformer_dim // 4), - activation(), - nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), - activation(), - ) - self.output_hypernetworks_mlps = nn.ModuleList( - [ - MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) - for i in range(self.num_mask_tokens) - ] - ) - - self.iou_prediction_head = MLP( - transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth - ) - - def forward( - self, - image_embeddings: torch.Tensor, - image_pe: torch.Tensor, - sparse_prompt_embeddings: torch.Tensor, - dense_prompt_embeddings: torch.Tensor, - multimask_output: bool, - hq_token_only: bool, - interm_embeddings: torch.Tensor, - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Predict masks given image and prompt embeddings. - - Arguments: - image_embeddings (torch.Tensor): the embeddings from the image encoder - image_pe (torch.Tensor): positional encoding with the shape of image_embeddings - sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes - dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs - multimask_output (bool): Whether to return multiple masks or a single - mask. - - Returns: - torch.Tensor: batched predicted masks - torch.Tensor: batched predictions of mask quality - """ - masks, iou_pred = self.predict_masks( - image_embeddings=image_embeddings, - image_pe=image_pe, - sparse_prompt_embeddings=sparse_prompt_embeddings, - dense_prompt_embeddings=dense_prompt_embeddings, - ) - - # Select the correct mask or masks for output - if multimask_output: - mask_slice = slice(1, None) - else: - mask_slice = slice(0, 1) - masks = masks[:, mask_slice, :, :] - iou_pred = iou_pred[:, mask_slice] - - # Prepare output - return masks, iou_pred - - def predict_masks( - self, - image_embeddings: torch.Tensor, - image_pe: torch.Tensor, - sparse_prompt_embeddings: torch.Tensor, - dense_prompt_embeddings: torch.Tensor, - ) -> Tuple[torch.Tensor, torch.Tensor]: - """Predicts masks. See 'forward' for more details.""" - # Concatenate output tokens - output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) - output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) - tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) - - # Expand per-image data in batch direction to be per-mask - src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) - src = src + dense_prompt_embeddings - pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) - b, c, h, w = src.shape - - # Run the transformer - hs, src = self.transformer(src, pos_src, tokens) - iou_token_out = hs[:, 0, :] - mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] - - # Upscale mask embeddings and predict masks using the mask tokens - src = src.transpose(1, 2).view(b, c, h, w) - upscaled_embedding = self.output_upscaling(src) - hyper_in_list: List[torch.Tensor] = [] - for i in range(self.num_mask_tokens): - hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) - hyper_in = torch.stack(hyper_in_list, dim=1) - b, c, h, w = upscaled_embedding.shape - masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) - - # Generate mask quality predictions - iou_pred = self.iou_prediction_head(iou_token_out) - - return masks, iou_pred - - -# Lightly adapted from -# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa -class MLP(nn.Module): - def __init__( - self, - input_dim: int, - hidden_dim: int, - output_dim: int, - num_layers: int, - sigmoid_output: bool = False, - ) -> None: - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - self.sigmoid_output = sigmoid_output - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - if self.sigmoid_output: - x = F.sigmoid(x) - return x \ No newline at end of file diff --git a/spaces/YuAnthony/Audio-Caption/coco_caption/pycocoevalcap/bleu/bleu_scorer.py b/spaces/YuAnthony/Audio-Caption/coco_caption/pycocoevalcap/bleu/bleu_scorer.py deleted file mode 100644 index d854d9bab00390073d7c4dc912252a34fb2b5a76..0000000000000000000000000000000000000000 --- a/spaces/YuAnthony/Audio-Caption/coco_caption/pycocoevalcap/bleu/bleu_scorer.py +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/env python - -# bleu_scorer.py -# David Chiang - -# Copyright (c) 2004-2006 University of Maryland. All rights -# reserved. Do not redistribute without permission from the -# author. Not for commercial use. - -# Modified by: -# Hao Fang -# Tsung-Yi Lin - -# ================================================================= -# This code was pulled from https://github.com/tylin/coco-caption -# and refactored for Python 3. -# Image-specific names and comments have also been changed to be audio-specific -# ================================================================= - -'''Provides: -cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test(). -cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked(). -''' - -import copy -import sys, math, re -from collections import defaultdict - -def precook(s, n=4, out=False): - """Takes a string as input and returns an object that can be given to - either cook_refs or cook_test. This is optional: cook_refs and cook_test - can take string arguments as well.""" - words = s.split() - counts = defaultdict(int) - for k in range(1,n+1): - for i in range(len(words)-k+1): - ngram = tuple(words[i:i+k]) - counts[ngram] += 1 - return (len(words), counts) - -def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with "average" - '''Takes a list of reference sentences for a single segment - and returns an object that encapsulates everything that BLEU - needs to know about them.''' - - reflen = [] - maxcounts = {} - for ref in refs: - rl, counts = precook(ref, n) - reflen.append(rl) - for (ngram,count) in counts.items(): - maxcounts[ngram] = max(maxcounts.get(ngram,0), count) - - # Calculate effective reference sentence length. - if eff == "shortest": - reflen = min(reflen) - elif eff == "average": - reflen = float(sum(reflen))/len(reflen) - - ## lhuang: N.B.: leave reflen computaiton to the very end!! - - ## lhuang: N.B.: in case of "closest", keep a list of reflens!! (bad design) - - return (reflen, maxcounts) - -def cook_test(test, reflen_refmaxcounts, eff=None, n=4): - '''Takes a test sentence and returns an object that - encapsulates everything that BLEU needs to know about it.''' - - testlen, counts = precook(test, n, True) - - result = {} - - reflen, refmaxcounts = reflen_refmaxcounts # Replaces the tuple unpacking - - # Calculate effective reference sentence length. - - if eff == "closest": - result["reflen"] = min((abs(l-testlen), l) for l in reflen)[1] - else: ## i.e., "average" or "shortest" or None - result["reflen"] = reflen - - result["testlen"] = testlen - - result["guess"] = [max(0,testlen-k+1) for k in range(1,n+1)] - - result['correct'] = [0]*n - for (ngram, count) in counts.items(): - result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count) - - return result - -class BleuScorer(object): - """Bleu scorer. - """ - - __slots__ = "n", "crefs", "ctest", "_score", "_ratio", "_testlen", "_reflen", "special_reflen" - # special_reflen is used in oracle (proportional effective ref len for a node). - - def copy(self): - ''' copy the refs.''' - new = BleuScorer(n=self.n) - new.ctest = copy.copy(self.ctest) - new.crefs = copy.copy(self.crefs) - new._score = None - return new - - def __init__(self, test=None, refs=None, n=4, special_reflen=None): - ''' singular instance ''' - - self.n = n - self.crefs = [] - self.ctest = [] - self.cook_append(test, refs) - self.special_reflen = special_reflen - - def cook_append(self, test, refs): - '''called by constructor and __iadd__ to avoid creating new instances.''' - - if refs is not None: - self.crefs.append(cook_refs(refs)) - if test is not None: - cooked_test = cook_test(test, self.crefs[-1]) - self.ctest.append(cooked_test) ## N.B.: -1 - else: - self.ctest.append(None) # lens of crefs and ctest have to match - - self._score = None ## need to recompute - - def ratio(self, option=None): - self.compute_score(option=option) - return self._ratio - - def score_ratio(self, option=None): - '''return (bleu, len_ratio) pair''' - return self.fscore(option=option), self.ratio(option=option) - - def score_ratio_str(self, option=None): - return "%.4f (%.2f)" % self.score_ratio(option) - - def reflen(self, option=None): - self.compute_score(option=option) - return self._reflen - - def testlen(self, option=None): - self.compute_score(option=option) - return self._testlen - - def retest(self, new_test): - if type(new_test) is str: - new_test = [new_test] - assert len(new_test) == len(self.crefs), new_test - self.ctest = [] - for t, rs in zip(new_test, self.crefs): - self.ctest.append(cook_test(t, rs)) - self._score = None - - return self - - def rescore(self, new_test): - ''' replace test(s) with new test(s), and returns the new score.''' - - return self.retest(new_test).compute_score() - - def size(self): - assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest)) - return len(self.crefs) - - def __iadd__(self, other): - '''add an instance (e.g., from another sentence).''' - - if type(other) is tuple: - ## avoid creating new BleuScorer instances - self.cook_append(other[0], other[1]) - else: - assert self.compatible(other), "incompatible BLEUs." - self.ctest.extend(other.ctest) - self.crefs.extend(other.crefs) - self._score = None ## need to recompute - - return self - - def compatible(self, other): - return isinstance(other, BleuScorer) and self.n == other.n - - def single_reflen(self, option="average"): - return self._single_reflen(self.crefs[0][0], option) - - def _single_reflen(self, reflens, option=None, testlen=None): - - if option == "shortest": - reflen = min(reflens) - elif option == "average": - reflen = float(sum(reflens))/len(reflens) - elif option == "closest": - reflen = min((abs(l-testlen), l) for l in reflens)[1] - else: - assert False, "unsupported reflen option %s" % option - - return reflen - - def recompute_score(self, option=None, verbose=0): - self._score = None - return self.compute_score(option, verbose) - - def compute_score(self, option=None, verbose=0): - n = self.n - small = 1e-9 - tiny = 1e-15 ## so that if guess is 0 still return 0 - bleu_list = [[] for _ in range(n)] - - if self._score is not None: - return self._score - - if option is None: - option = "average" if len(self.crefs) == 1 else "closest" - - self._testlen = 0 - self._reflen = 0 - totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n} - - # for each sentence - for comps in self.ctest: - testlen = comps['testlen'] - self._testlen += testlen - - if self.special_reflen is None: ## need computation - reflen = self._single_reflen(comps['reflen'], option, testlen) - else: - reflen = self.special_reflen - - self._reflen += reflen - - for key in ['guess','correct']: - for k in range(n): - totalcomps[key][k] += comps[key][k] - - # append per audio bleu score - bleu = 1. - for k in range(n): - bleu *= (float(comps['correct'][k]) + tiny) \ - /(float(comps['guess'][k]) + small) - bleu_list[k].append(bleu ** (1./(k+1))) - ratio = (testlen + tiny) / (reflen + small) ## N.B.: avoid zero division - if ratio < 1: - for k in range(n): - bleu_list[k][-1] *= math.exp(1 - 1/ratio) - - if verbose > 1: - print(comps, reflen) - - totalcomps['reflen'] = self._reflen - totalcomps['testlen'] = self._testlen - - bleus = [] - bleu = 1. - for k in range(n): - bleu *= float(totalcomps['correct'][k] + tiny) \ - / (totalcomps['guess'][k] + small) - bleus.append(bleu ** (1./(k+1))) - ratio = (self._testlen + tiny) / (self._reflen + small) ## N.B.: avoid zero division - if ratio < 1: - for k in range(n): - bleus[k] *= math.exp(1 - 1/ratio) - - if verbose > 0: - print(totalcomps) - print("ratio:", ratio) - - self._score = bleus - return self._score, bleu_list diff --git a/spaces/Zaixi/ICLR_FLAG/utils/sascorer.py b/spaces/Zaixi/ICLR_FLAG/utils/sascorer.py deleted file mode 100644 index b1c694a1e1680ee3dd86dc0ef2ed8613360f8ef6..0000000000000000000000000000000000000000 --- a/spaces/Zaixi/ICLR_FLAG/utils/sascorer.py +++ /dev/null @@ -1,163 +0,0 @@ -from __future__ import print_function - -from rdkit import Chem -from rdkit.Chem import rdMolDescriptors -from rdkit.six.moves import cPickle -from rdkit.six import iteritems - -import math -from collections import defaultdict - -import os.path as op - -_fscores = None - - -def readFragmentScores(name='fpscores'): - import gzip - global _fscores - # generate the full path filename: - if name == "fpscores": - name = op.join(op.dirname(__file__), name) - _fscores = cPickle.load(gzip.open('%s.pkl.gz' % name)) - outDict = {} - for i in _fscores: - for j in range(1, len(i)): - outDict[i[j]] = float(i[0]) - _fscores = outDict - - -def numBridgeheadsAndSpiro(mol, ri=None): - nSpiro = rdMolDescriptors.CalcNumSpiroAtoms(mol) - nBridgehead = rdMolDescriptors.CalcNumBridgeheadAtoms(mol) - return nBridgehead, nSpiro - - -def calculateScore(m): - if _fscores is None: - readFragmentScores() - - # fragment score - fp = rdMolDescriptors.GetMorganFingerprint(m, - 2) #<- 2 is the *radius* of the circular fingerprint - fps = fp.GetNonzeroElements() - score1 = 0. - nf = 0 - for bitId, v in iteritems(fps): - nf += v - sfp = bitId - score1 += _fscores.get(sfp, -4) * v - score1 /= nf - - # features score - nAtoms = m.GetNumAtoms() - nChiralCenters = len(Chem.FindMolChiralCenters(m, includeUnassigned=True)) - ri = m.GetRingInfo() - nBridgeheads, nSpiro = numBridgeheadsAndSpiro(m, ri) - nMacrocycles = 0 - for x in ri.AtomRings(): - if len(x) > 8: - nMacrocycles += 1 - - sizePenalty = nAtoms**1.005 - nAtoms - stereoPenalty = math.log10(nChiralCenters + 1) - spiroPenalty = math.log10(nSpiro + 1) - bridgePenalty = math.log10(nBridgeheads + 1) - macrocyclePenalty = 0. - # --------------------------------------- - # This differs from the paper, which defines: - # macrocyclePenalty = math.log10(nMacrocycles+1) - # This form generates better results when 2 or more macrocycles are present - if nMacrocycles > 0: - macrocyclePenalty = math.log10(2) - - score2 = 0. - sizePenalty - stereoPenalty - spiroPenalty - bridgePenalty - macrocyclePenalty - - # correction for the fingerprint density - # not in the original publication, added in version 1.1 - # to make highly symmetrical molecules easier to synthetise - score3 = 0. - if nAtoms > len(fps): - score3 = math.log(float(nAtoms) / len(fps)) * .5 - - sascore = score1 + score2 + score3 - - # need to transform "raw" value into scale between 1 and 10 - min = -4.0 - max = 2.5 - sascore = 11. - (sascore - min + 1) / (max - min) * 9. - # smooth the 10-end - if sascore > 8.: - sascore = 8. + math.log(sascore + 1. - 9.) - if sascore > 10.: - sascore = 10.0 - elif sascore < 1.: - sascore = 1.0 - - return sascore - - -def processMols(mols): - print('smiles\tName\tsa_score') - for i, m in enumerate(mols): - if m is None: - continue - - s = calculateScore(m) - - smiles = Chem.MolToSmiles(m) - print(smiles + "\t" + m.GetProp('_Name') + "\t%3f" % s) - - -if __name__ == '__main__': - import sys, time - - t1 = time.time() - readFragmentScores("fpscores") - t2 = time.time() - - suppl = Chem.SmilesMolSupplier(sys.argv[1]) - t3 = time.time() - processMols(suppl) - t4 = time.time() - - print('Reading took %.2f seconds. Calculating took %.2f seconds' % ((t2 - t1), (t4 - t3)), - file=sys.stderr) - -# -# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Novartis Institutes for BioMedical Research Inc. -# nor the names of its contributors may be used to endorse or promote -# products derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# - -def compute_sa_score(rdmol): - rdmol = Chem.MolFromSmiles(Chem.MolToSmiles(rdmol)) - sa = calculateScore(rdmol) - sa = round((10-sa)/9,2) - return sa - \ No newline at end of file diff --git a/spaces/Zengyf-CVer/Streamlit_YOLOv5_Model2x/utils/plots.py b/spaces/Zengyf-CVer/Streamlit_YOLOv5_Model2x/utils/plots.py deleted file mode 100644 index 0f322b6b5844860c2304472f3b1676ad7194d1b6..0000000000000000000000000000000000000000 --- a/spaces/Zengyf-CVer/Streamlit_YOLOv5_Model2x/utils/plots.py +++ /dev/null @@ -1,519 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Plotting utils -""" - -import contextlib -import math -import os -from copy import copy -from pathlib import Path -from urllib.error import URLError - -import cv2 -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sn -import torch -from PIL import Image, ImageDraw, ImageFont - -from utils import TryExcept, threaded -from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, - is_ascii, xywh2xyxy, xyxy2xywh) -from utils.metrics import fitness - -# Settings -RANK = int(os.getenv('RANK', -1)) -matplotlib.rc('font', **{'size': 11}) -matplotlib.use('Agg') # for writing to files only - - -class Colors: - # Ultralytics color palette https://ultralytics.com/ - def __init__(self): - # hex = matplotlib.colors.TABLEAU_COLORS.values() - hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', - '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - self.palette = [self.hex2rgb(f'#{c}') for c in hexs] - self.n = len(self.palette) - - def __call__(self, i, bgr=False): - c = self.palette[int(i) % self.n] - return (c[2], c[1], c[0]) if bgr else c - - @staticmethod - def hex2rgb(h): # rgb order (PIL) - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) - - -colors = Colors() # create instance for 'from utils.plots import colors' - - -def check_pil_font(font=FONT, size=10): - # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary - font = Path(font) - font = font if font.exists() else (CONFIG_DIR / font.name) - try: - return ImageFont.truetype(str(font) if font.exists() else font.name, size) - except Exception: # download if missing - try: - check_font(font) - return ImageFont.truetype(str(font), size) - except TypeError: - check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 - except URLError: # not online - return ImageFont.load_default() - - -class Annotator: - # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic - self.pil = pil or non_ascii - if self.pil: # use PIL - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) - self.draw = ImageDraw.Draw(self.im) - self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) - else: # use cv2 - self.im = im - self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width - - def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): - # Add one xyxy box to image with label - if self.pil or not is_ascii(label): - self.draw.rectangle(box, width=self.lw, outline=color) # box - if label: - w, h = self.font.getsize(label) # text width, height - outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle( - (box[0], box[1] - h if outside else box[1], box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1), - fill=color, - ) - # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) - else: # cv2 - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) - if label: - tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height - outside = p1[1] - h >= 3 - p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 - cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, - label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), - 0, - self.lw / 3, - txt_color, - thickness=tf, - lineType=cv2.LINE_AA) - - def rectangle(self, xy, fill=None, outline=None, width=1): - # Add rectangle to image (PIL-only) - self.draw.rectangle(xy, fill, outline, width) - - def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): - # Add text to image (PIL-only) - if anchor == 'bottom': # start y from font bottom - w, h = self.font.getsize(text) # text width, height - xy[1] += 1 - h - self.draw.text(xy, text, fill=txt_color, font=self.font) - - def result(self): - # Return annotated image as array - return np.asarray(self.im) - - -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): - """ - x: Features to be visualized - module_type: Module type - stage: Module stage within model - n: Maximum number of feature maps to plot - save_dir: Directory to save results - """ - if 'Detect' not in module_type: - batch, channels, height, width = x.shape # batch, channels, height, width - if height > 1 and width > 1: - f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename - - blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels - n = min(n, channels) # number of plots - fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols - ax = ax.ravel() - plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') - - LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.title('Features') - plt.savefig(f, dpi=300, bbox_inches='tight') - plt.close() - np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save - - -def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png - xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) - hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) - xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) - yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) - return np.log(hist[xidx, yidx]) - - -def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): - from scipy.signal import butter, filtfilt - - # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy - def butter_lowpass(cutoff, fs, order): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) - - b, a = butter_lowpass(cutoff, fs, order=order) - return filtfilt(b, a, data) # forward-backward filter - - -def output_to_target(output): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] - targets = [] - for i, o in enumerate(output): - targets.extend([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf] for *box, conf, cls in o.cpu().numpy()) - return np.array(targets) - - -@threaded -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): - # Plot image grid with labels - if isinstance(images, torch.Tensor): - images = images.cpu().float().numpy() - if isinstance(targets, torch.Tensor): - targets = targets.cpu().numpy() - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) - bs, _, h, w = images.shape # batch size, _, height, width - bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) - - # Build Image - mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, im in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im - - # Resize (optional) - scale = max_size / ns / max(h, w) - if scale < 1: - h = math.ceil(scale * h) - w = math.ceil(scale * w) - mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) - - # Annotate - fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders - if paths: - annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames - if len(targets) > 0: - ti = targets[targets[:, 0] == i] # image targets - boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') - labels = ti.shape[1] == 6 # labels if no conf column - conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) - - if boxes.shape[1]: - if boxes.max() <= 1.01: # if normalized with tolerance 0.01 - boxes[[0, 2]] *= w # scale to pixels - boxes[[1, 3]] *= h - elif scale < 1: # absolute coords need scale if image scales - boxes *= scale - boxes[[0, 2]] += x - boxes[[1, 3]] += y - for j, box in enumerate(boxes.T.tolist()): - cls = classes[j] - color = colors(cls) - cls = names[cls] if names else cls - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' - annotator.box_label(box, label, color=color) - annotator.im.save(fname) # save - - -def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): - # Plot LR simulating training for full epochs - optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals - y = [] - for _ in range(epochs): - scheduler.step() - y.append(optimizer.param_groups[0]['lr']) - plt.plot(y, '.-', label='LR') - plt.xlabel('epoch') - plt.ylabel('LR') - plt.grid() - plt.xlim(0, epochs) - plt.ylim(0) - plt.savefig(Path(save_dir) / 'LR.png', dpi=200) - plt.close() - - -def plot_val_txt(): # from utils.plots import *; plot_val() - # Plot val.txt histograms - x = np.loadtxt('val.txt', dtype=np.float32) - box = xyxy2xywh(x[:, :4]) - cx, cy = box[:, 0], box[:, 1] - - fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) - ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) - ax.set_aspect('equal') - plt.savefig('hist2d.png', dpi=300) - - fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) - ax[0].hist(cx, bins=600) - ax[1].hist(cy, bins=600) - plt.savefig('hist1d.png', dpi=200) - - -def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() - # Plot targets.txt histograms - x = np.loadtxt('targets.txt', dtype=np.float32).T - s = ['x targets', 'y targets', 'width targets', 'height targets'] - fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) - ax = ax.ravel() - for i in range(4): - ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') - ax[i].legend() - ax[i].set_title(s[i]) - plt.savefig('targets.jpg', dpi=200) - - -def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() - # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) - save_dir = Path(file).parent if file else Path(dir) - plot2 = False # plot additional results - if plot2: - ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() - - fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(save_dir.glob('study*.txt')): - y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T - x = np.arange(y.shape[1]) if x is None else np.array(x) - if plot2: - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] - for i in range(7): - ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - ax[i].set_title(s[i]) - - j = y[3].argmax() + 1 - ax2.plot(y[5, 1:j], - y[3, 1:j] * 1E2, - '.-', - linewidth=2, - markersize=8, - label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) - - ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', - linewidth=2, - markersize=8, - alpha=.25, - label='EfficientDet') - - ax2.grid(alpha=0.2) - ax2.set_yticks(np.arange(20, 60, 5)) - ax2.set_xlim(0, 57) - ax2.set_ylim(25, 55) - ax2.set_xlabel('GPU Speed (ms/img)') - ax2.set_ylabel('COCO AP val') - ax2.legend(loc='lower right') - f = save_dir / 'study.png' - print(f'Saving {f}...') - plt.savefig(f, dpi=300) - - -@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 -def plot_labels(labels, names=(), save_dir=Path('')): - # plot dataset labels - LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") - c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes - nc = int(c.max() + 1) # number of classes - x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) - - # seaborn correlogram - sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) - plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) - plt.close() - - # matplotlib labels - matplotlib.use('svg') # faster - ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() - y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - with contextlib.suppress(Exception): # color histogram bars by class - [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 - ax[0].set_ylabel('instances') - if 0 < len(names) < 30: - ax[0].set_xticks(range(len(names))) - ax[0].set_xticklabels(names, rotation=90, fontsize=10) - else: - ax[0].set_xlabel('classes') - sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) - - # rectangles - labels[:, 1:3] = 0.5 # center - labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 - img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) - for cls, *box in labels[:1000]: - ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot - ax[1].imshow(img) - ax[1].axis('off') - - for a in [0, 1, 2, 3]: - for s in ['top', 'right', 'left', 'bottom']: - ax[a].spines[s].set_visible(False) - - plt.savefig(save_dir / 'labels.jpg', dpi=200) - matplotlib.use('Agg') - plt.close() - - -def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): - # Show classification image grid with labels (optional) and predictions (optional) - from utils.augmentations import denormalize - - names = names or [f'class{i}' for i in range(1000)] - blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), - dim=0) # select batch index 0, block by channels - n = min(len(blocks), nmax) # number of plots - m = min(8, round(n ** 0.5)) # 8 x 8 default - fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols - ax = ax.ravel() if m > 1 else [ax] - # plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) - ax[i].axis('off') - if labels is not None: - s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') - ax[i].set_title(s, fontsize=8, verticalalignment='top') - plt.savefig(f, dpi=300, bbox_inches='tight') - plt.close() - if verbose: - LOGGER.info(f"Saving {f}") - if labels is not None: - LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) - if pred is not None: - LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) - return f - - -def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() - # Plot evolve.csv hyp evolution results - evolve_csv = Path(evolve_csv) - data = pd.read_csv(evolve_csv) - keys = [x.strip() for x in data.columns] - x = data.values - f = fitness(x) - j = np.argmax(f) # max fitness index - plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - print(f'Best results from row {j} of {evolve_csv}:') - for i, k in enumerate(keys[7:]): - v = x[:, 7 + i] - mu = v[j] # best single result - plt.subplot(6, 5, i + 1) - plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters - if i % 5 != 0: - plt.yticks([]) - print(f'{k:>15}: {mu:.3g}') - f = evolve_csv.with_suffix('.png') # filename - plt.savefig(f, dpi=200) - plt.close() - print(f'Saved {f}') - - -def plot_results(file='path/to/results.csv', dir=''): - # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') - save_dir = Path(file).parent if file else Path(dir) - fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) - ax = ax.ravel() - files = list(save_dir.glob('results*.csv')) - assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' - for f in files: - try: - data = pd.read_csv(f) - s = [x.strip() for x in data.columns] - x = data.values[:, 0] - for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): - y = data.values[:, j].astype('float') - # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) - ax[i].set_title(s[j], fontsize=12) - # if j in [8, 9, 10]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - LOGGER.info(f'Warning: Plotting error for {f}: {e}') - ax[1].legend() - fig.savefig(save_dir / 'results.png', dpi=200) - plt.close() - - -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): - # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() - ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows - n = results.shape[1] # number of rows - x = np.arange(start, min(stop, n) if stop else n) - results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s - results[0] = x - for i, a in enumerate(ax): - if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) - a.set_title(s[i]) - a.set_xlabel('time (s)') - # if fi == len(files) - 1: - # a.set_ylim(bottom=0) - for side in ['top', 'right']: - a.spines[side].set_visible(False) - else: - a.remove() - except Exception as e: - print(f'Warning: Plotting error for {f}; {e}') - ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - - -def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): - # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop - xyxy = torch.tensor(xyxy).view(-1, 4) - b = xyxy2xywh(xyxy) # boxes - if square: - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square - b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad - xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] - if save: - file.parent.mkdir(parents=True, exist_ok=True) # make directory - f = str(increment_path(file).with_suffix('.jpg')) - # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue - Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB - return crop diff --git a/spaces/aadnk/faster-whisper-webui/src/whisper/fasterWhisperContainer.py b/spaces/aadnk/faster-whisper-webui/src/whisper/fasterWhisperContainer.py deleted file mode 100644 index 5bd640eeba90f7ad2c6a2795ed14e40d30e90c4c..0000000000000000000000000000000000000000 --- a/spaces/aadnk/faster-whisper-webui/src/whisper/fasterWhisperContainer.py +++ /dev/null @@ -1,207 +0,0 @@ -import os -from typing import List, Union - -from faster_whisper import WhisperModel, download_model -from src.config import ModelConfig, VadInitialPromptMode -from src.hooks.progressListener import ProgressListener -from src.languages import get_language_from_name -from src.modelCache import ModelCache -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy -from src.whisper.abstractWhisperContainer import AbstractWhisperCallback, AbstractWhisperContainer -from src.utils import format_timestamp - -class FasterWhisperContainer(AbstractWhisperContainer): - def __init__(self, model_name: str, device: str = None, compute_type: str = "float16", - download_root: str = None, - cache: ModelCache = None, models: List[ModelConfig] = []): - super().__init__(model_name, device, compute_type, download_root, cache, models) - - def ensure_downloaded(self): - """ - Ensure that the model is downloaded. This is useful if you want to ensure that the model is downloaded before - passing the container to a subprocess. - """ - model_config = self._get_model_config() - - if os.path.isdir(model_config.url): - model_config.path = model_config.url - else: - model_config.path = download_model(model_config.url, output_dir=self.download_root) - - def _get_model_config(self) -> ModelConfig: - """ - Get the model configuration for the model. - """ - for model in self.models: - if model.name == self.model_name: - return model - return None - - def _create_model(self): - print("Loading faster whisper model " + self.model_name + " for device " + str(self.device)) - model_config = self._get_model_config() - model_url = model_config.url - - if model_config.type == "whisper": - if model_url not in ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"]: - raise Exception("FasterWhisperContainer does not yet support Whisper models. Use ct2-transformers-converter to convert the model to a faster-whisper model.") - if model_url == "large": - # large is an alias for large-v1 - model_url = "large-v1" - - device = self.device - - if (device is None): - device = "auto" - - model = WhisperModel(model_url, device=device, compute_type=self.compute_type) - return model - - def create_callback(self, language: str = None, task: str = None, - prompt_strategy: AbstractPromptStrategy = None, - **decodeOptions: dict) -> AbstractWhisperCallback: - """ - Create a WhisperCallback object that can be used to transcript audio files. - - Parameters - ---------- - language: str - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - prompt_strategy: AbstractPromptStrategy - The prompt strategy to use. If not specified, the prompt from Whisper will be used. - decodeOptions: dict - Additional options to pass to the decoder. Must be pickleable. - - Returns - ------- - A WhisperCallback object. - """ - return FasterWhisperCallback(self, language=language, task=task, prompt_strategy=prompt_strategy, **decodeOptions) - -class FasterWhisperCallback(AbstractWhisperCallback): - def __init__(self, model_container: FasterWhisperContainer, language: str = None, task: str = None, - prompt_strategy: AbstractPromptStrategy = None, - **decodeOptions: dict): - self.model_container = model_container - self.language = language - self.task = task - self.prompt_strategy = prompt_strategy - self.decodeOptions = decodeOptions - - self._printed_warning = False - - def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None): - """ - Peform the transcription of the given audio file or data. - - Parameters - ---------- - audio: Union[str, np.ndarray, torch.Tensor] - The audio file to transcribe, or the audio data as a numpy array or torch tensor. - segment_index: int - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - progress_listener: ProgressListener - A callback to receive progress updates. - """ - model: WhisperModel = self.model_container.get_model() - language_code = self._lookup_language_code(self.language) if self.language else None - - # Copy decode options and remove options that are not supported by faster-whisper - decodeOptions = self.decodeOptions.copy() - verbose = decodeOptions.pop("verbose", None) - - logprob_threshold = decodeOptions.pop("logprob_threshold", None) - - patience = decodeOptions.pop("patience", None) - length_penalty = decodeOptions.pop("length_penalty", None) - suppress_tokens = decodeOptions.pop("suppress_tokens", None) - - if (decodeOptions.pop("fp16", None) is not None): - if not self._printed_warning: - print("WARNING: fp16 option is ignored by faster-whisper - use compute_type instead.") - self._printed_warning = True - - # Fix up decode options - if (logprob_threshold is not None): - decodeOptions["log_prob_threshold"] = logprob_threshold - - decodeOptions["patience"] = float(patience) if patience is not None else 1.0 - decodeOptions["length_penalty"] = float(length_penalty) if length_penalty is not None else 1.0 - - # See if supress_tokens is a string - if so, convert it to a list of ints - decodeOptions["suppress_tokens"] = self._split_suppress_tokens(suppress_tokens) - - initial_prompt = self.prompt_strategy.get_segment_prompt(segment_index, prompt, detected_language) \ - if self.prompt_strategy else prompt - - segments_generator, info = model.transcribe(audio, \ - language=language_code if language_code else detected_language, task=self.task, \ - initial_prompt=initial_prompt, \ - **decodeOptions - ) - - segments = [] - - for segment in segments_generator: - segments.append(segment) - - if progress_listener is not None: - progress_listener.on_progress(segment.end, info.duration) - if verbose: - print("[{}->{}] {}".format(format_timestamp(segment.start, True), format_timestamp(segment.end, True), - segment.text)) - - text = " ".join([segment.text for segment in segments]) - - # Convert the segments to a format that is easier to serialize - whisper_segments = [{ - "text": segment.text, - "start": segment.start, - "end": segment.end, - - # Extra fields added by faster-whisper - "words": [{ - "start": word.start, - "end": word.end, - "word": word.word, - "probability": word.probability - } for word in (segment.words if segment.words is not None else []) ] - } for segment in segments] - - result = { - "segments": whisper_segments, - "text": text, - "language": info.language if info else None, - - # Extra fields added by faster-whisper - "language_probability": info.language_probability if info else None, - "duration": info.duration if info else None - } - - # If we have a prompt strategy, we need to increment the current prompt - if self.prompt_strategy: - self.prompt_strategy.on_segment_finished(segment_index, prompt, detected_language, result) - - if progress_listener is not None: - progress_listener.on_finished() - return result - - def _split_suppress_tokens(self, suppress_tokens: Union[str, List[int]]): - if (suppress_tokens is None): - return None - if (isinstance(suppress_tokens, list)): - return suppress_tokens - - return [int(token) for token in suppress_tokens.split(",")] - - def _lookup_language_code(self, language: str): - language = get_language_from_name(language) - - if language is None: - raise ValueError("Invalid language: " + language) - - return language.code diff --git a/spaces/aadnk/whisper-webui/src/whisper/abstractWhisperContainer.py b/spaces/aadnk/whisper-webui/src/whisper/abstractWhisperContainer.py deleted file mode 100644 index 98cae0679185e2142f3cd3c7bdf35ab67640d5b2..0000000000000000000000000000000000000000 --- a/spaces/aadnk/whisper-webui/src/whisper/abstractWhisperContainer.py +++ /dev/null @@ -1,115 +0,0 @@ -import abc -from typing import Any, Callable, List - -from src.config import ModelConfig, VadInitialPromptMode - -from src.hooks.progressListener import ProgressListener -from src.modelCache import GLOBAL_MODEL_CACHE, ModelCache -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy - -class AbstractWhisperCallback: - def __init__(self): - pass - - @abc.abstractmethod - def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None): - """ - Peform the transcription of the given audio file or data. - - Parameters - ---------- - audio: Union[str, np.ndarray, torch.Tensor] - The audio file to transcribe, or the audio data as a numpy array or torch tensor. - segment_index: int - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - progress_listener: ProgressListener - A callback to receive progress updates. - """ - raise NotImplementedError() - -class LambdaWhisperCallback(AbstractWhisperCallback): - def __init__(self, callback_lambda: Callable[[Any, int, str, str, ProgressListener], None]): - super().__init__() - self.callback_lambda = callback_lambda - - def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None): - return self.callback_lambda(audio, segment_index, prompt, detected_language, progress_listener) - -class AbstractWhisperContainer: - def __init__(self, model_name: str, device: str = None, compute_type: str = "float16", - download_root: str = None, - cache: ModelCache = None, models: List[ModelConfig] = []): - self.model_name = model_name - self.device = device - self.compute_type = compute_type - self.download_root = download_root - self.cache = cache - - # Will be created on demand - self.model = None - - # List of known models - self.models = models - - def get_model(self): - if self.model is None: - - if (self.cache is None): - self.model = self._create_model() - else: - model_key = "WhisperContainer." + self.model_name + ":" + (self.device if self.device else '') - self.model = self.cache.get(model_key, self._create_model) - return self.model - - @abc.abstractmethod - def _create_model(self): - raise NotImplementedError() - - def ensure_downloaded(self): - pass - - @abc.abstractmethod - def create_callback(self, language: str = None, task: str = None, - prompt_strategy: AbstractPromptStrategy = None, - **decodeOptions: dict) -> AbstractWhisperCallback: - """ - Create a WhisperCallback object that can be used to transcript audio files. - - Parameters - ---------- - language: str - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - prompt_strategy: AbstractPromptStrategy - The prompt strategy to use for the transcription. - decodeOptions: dict - Additional options to pass to the decoder. Must be pickleable. - - Returns - ------- - A WhisperCallback object. - """ - raise NotImplementedError() - - # This is required for multiprocessing - def __getstate__(self): - return { - "model_name": self.model_name, - "device": self.device, - "download_root": self.download_root, - "models": self.models, - "compute_type": self.compute_type - } - - def __setstate__(self, state): - self.model_name = state["model_name"] - self.device = state["device"] - self.download_root = state["download_root"] - self.models = state["models"] - self.compute_type = state["compute_type"] - self.model = None - # Depickled objects must use the global cache - self.cache = GLOBAL_MODEL_CACHE \ No newline at end of file diff --git a/spaces/abhishek/first-order-motion-model/modules/discriminator.py b/spaces/abhishek/first-order-motion-model/modules/discriminator.py deleted file mode 100644 index 8356493e49cb9d5723f52d2ac24f8b5d244d3a0f..0000000000000000000000000000000000000000 --- a/spaces/abhishek/first-order-motion-model/modules/discriminator.py +++ /dev/null @@ -1,95 +0,0 @@ -from torch import nn -import torch.nn.functional as F -from modules.util import kp2gaussian -import torch - - -class DownBlock2d(nn.Module): - """ - Simple block for processing video (encoder). - """ - - def __init__(self, in_features, out_features, norm=False, kernel_size=4, pool=False, sn=False): - super(DownBlock2d, self).__init__() - self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size) - - if sn: - self.conv = nn.utils.spectral_norm(self.conv) - - if norm: - self.norm = nn.InstanceNorm2d(out_features, affine=True) - else: - self.norm = None - self.pool = pool - - def forward(self, x): - out = x - out = self.conv(out) - if self.norm: - out = self.norm(out) - out = F.leaky_relu(out, 0.2) - if self.pool: - out = F.avg_pool2d(out, (2, 2)) - return out - - -class Discriminator(nn.Module): - """ - Discriminator similar to Pix2Pix - """ - - def __init__(self, num_channels=3, block_expansion=64, num_blocks=4, max_features=512, - sn=False, use_kp=False, num_kp=10, kp_variance=0.01, **kwargs): - super(Discriminator, self).__init__() - - down_blocks = [] - for i in range(num_blocks): - down_blocks.append( - DownBlock2d(num_channels + num_kp * use_kp if i == 0 else min(max_features, block_expansion * (2 ** i)), - min(max_features, block_expansion * (2 ** (i + 1))), - norm=(i != 0), kernel_size=4, pool=(i != num_blocks - 1), sn=sn)) - - self.down_blocks = nn.ModuleList(down_blocks) - self.conv = nn.Conv2d(self.down_blocks[-1].conv.out_channels, out_channels=1, kernel_size=1) - if sn: - self.conv = nn.utils.spectral_norm(self.conv) - self.use_kp = use_kp - self.kp_variance = kp_variance - - def forward(self, x, kp=None): - feature_maps = [] - out = x - if self.use_kp: - heatmap = kp2gaussian(kp, x.shape[2:], self.kp_variance) - out = torch.cat([out, heatmap], dim=1) - - for down_block in self.down_blocks: - feature_maps.append(down_block(out)) - out = feature_maps[-1] - prediction_map = self.conv(out) - - return feature_maps, prediction_map - - -class MultiScaleDiscriminator(nn.Module): - """ - Multi-scale (scale) discriminator - """ - - def __init__(self, scales=(), **kwargs): - super(MultiScaleDiscriminator, self).__init__() - self.scales = scales - discs = {} - for scale in scales: - discs[str(scale).replace('.', '-')] = Discriminator(**kwargs) - self.discs = nn.ModuleDict(discs) - - def forward(self, x, kp=None): - out_dict = {} - for scale, disc in self.discs.items(): - scale = str(scale).replace('-', '.') - key = 'prediction_' + scale - feature_maps, prediction_map = disc(x[key], kp) - out_dict['feature_maps_' + scale] = feature_maps - out_dict['prediction_map_' + scale] = prediction_map - return out_dict diff --git a/spaces/abhishek/sketch-to-image/annotator/midas/midas/blocks.py b/spaces/abhishek/sketch-to-image/annotator/midas/midas/blocks.py deleted file mode 100644 index 62d50a2fde0a44b94271d4329c3934d1d3f2ba1a..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/midas/midas/blocks.py +++ /dev/null @@ -1,352 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala -''' - -import torch -import torch.nn as nn - -from .vit import ( - _make_pretrained_vitb_rn50_384, - _make_pretrained_vitl16_384, - _make_pretrained_vitb16_384, - forward_vit, -) - -def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",): - if backbone == "vitl16_384": - pretrained = _make_pretrained_vitl16_384( - use_pretrained, hooks=hooks, use_readout=use_readout - ) - scratch = _make_scratch( - [256, 512, 1024, 1024], features, groups=groups, expand=expand - ) # ViT-L/16 - 85.0% Top1 (backbone) - elif backbone == "vitb_rn50_384": - pretrained = _make_pretrained_vitb_rn50_384( - use_pretrained, - hooks=hooks, - use_vit_only=use_vit_only, - use_readout=use_readout, - ) - scratch = _make_scratch( - [256, 512, 768, 768], features, groups=groups, expand=expand - ) # ViT-H/16 - 85.0% Top1 (backbone) - elif backbone == "vitb16_384": - pretrained = _make_pretrained_vitb16_384( - use_pretrained, hooks=hooks, use_readout=use_readout - ) - scratch = _make_scratch( - [96, 192, 384, 768], features, groups=groups, expand=expand - ) # ViT-B/16 - 84.6% Top1 (backbone) - elif backbone == "resnext101_wsl": - pretrained = _make_pretrained_resnext101_wsl(use_pretrained) - scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3 - elif backbone == "efficientnet_lite3": - pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable) - scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3 - else: - print(f"Backbone '{backbone}' not implemented") - assert False - - return pretrained, scratch - - -def _make_scratch(in_shape, out_shape, groups=1, expand=False): - scratch = nn.Module() - - out_shape1 = out_shape - out_shape2 = out_shape - out_shape3 = out_shape - out_shape4 = out_shape - if expand==True: - out_shape1 = out_shape - out_shape2 = out_shape*2 - out_shape3 = out_shape*4 - out_shape4 = out_shape*8 - - scratch.layer1_rn = nn.Conv2d( - in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups - ) - scratch.layer2_rn = nn.Conv2d( - in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups - ) - scratch.layer3_rn = nn.Conv2d( - in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups - ) - scratch.layer4_rn = nn.Conv2d( - in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups - ) - - return scratch - - -def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False): - efficientnet = torch.hub.load( - "rwightman/gen-efficientnet-pytorch", - "tf_efficientnet_lite3", - pretrained=use_pretrained, - exportable=exportable - ) - return _make_efficientnet_backbone(efficientnet) - - -def _make_efficientnet_backbone(effnet): - pretrained = nn.Module() - - pretrained.layer1 = nn.Sequential( - effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2] - ) - pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3]) - pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5]) - pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9]) - - return pretrained - - -def _make_resnet_backbone(resnet): - pretrained = nn.Module() - pretrained.layer1 = nn.Sequential( - resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1 - ) - - pretrained.layer2 = resnet.layer2 - pretrained.layer3 = resnet.layer3 - pretrained.layer4 = resnet.layer4 - - return pretrained - - -def _make_pretrained_resnext101_wsl(use_pretrained): - resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl") - return _make_resnet_backbone(resnet) - - - -class Interpolate(nn.Module): - """Interpolation module. - """ - - def __init__(self, scale_factor, mode, align_corners=False): - """Init. - - Args: - scale_factor (float): scaling - mode (str): interpolation mode - """ - super(Interpolate, self).__init__() - - self.interp = nn.functional.interpolate - self.scale_factor = scale_factor - self.mode = mode - self.align_corners = align_corners - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: interpolated data - """ - - x = self.interp( - x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners - ) - - return x - - -class ResidualConvUnit(nn.Module): - """Residual convolution module. - """ - - def __init__(self, features): - """Init. - - Args: - features (int): number of features - """ - super().__init__() - - self.conv1 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True - ) - - self.conv2 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True - ) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: output - """ - out = self.relu(x) - out = self.conv1(out) - out = self.relu(out) - out = self.conv2(out) - - return out + x - - -class FeatureFusionBlock(nn.Module): - """Feature fusion block. - """ - - def __init__(self, features): - """Init. - - Args: - features (int): number of features - """ - super(FeatureFusionBlock, self).__init__() - - self.resConfUnit1 = ResidualConvUnit(features) - self.resConfUnit2 = ResidualConvUnit(features) - - def forward(self, *xs): - """Forward pass. - - Returns: - tensor: output - """ - output = xs[0] - - if len(xs) == 2: - output += self.resConfUnit1(xs[1]) - - output = self.resConfUnit2(output) - - output = nn.functional.interpolate( - output, scale_factor=2, mode="bilinear", align_corners=True - ) - - return output - - - - -class ResidualConvUnit_custom(nn.Module): - """Residual convolution module. - """ - - def __init__(self, features, activation, bn): - """Init. - - Args: - features (int): number of features - """ - super().__init__() - - self.bn = bn - - self.groups=1 - - self.conv1 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups - ) - - self.conv2 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups - ) - - if self.bn==True: - self.bn1 = nn.BatchNorm2d(features) - self.bn2 = nn.BatchNorm2d(features) - - self.activation = activation - - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: output - """ - - out = self.activation(x) - out = self.conv1(out) - if self.bn==True: - out = self.bn1(out) - - out = self.activation(out) - out = self.conv2(out) - if self.bn==True: - out = self.bn2(out) - - if self.groups > 1: - out = self.conv_merge(out) - - return self.skip_add.add(out, x) - - # return out + x - - -class FeatureFusionBlock_custom(nn.Module): - """Feature fusion block. - """ - - def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True): - """Init. - - Args: - features (int): number of features - """ - super(FeatureFusionBlock_custom, self).__init__() - - self.deconv = deconv - self.align_corners = align_corners - - self.groups=1 - - self.expand = expand - out_features = features - if self.expand==True: - out_features = features//2 - - self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) - - self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) - self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) - - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, *xs): - """Forward pass. - - Returns: - tensor: output - """ - output = xs[0] - - if len(xs) == 2: - res = self.resConfUnit1(xs[1]) - output = self.skip_add.add(output, res) - # output += res - - output = self.resConfUnit2(output) - - output = nn.functional.interpolate( - output, scale_factor=2, mode="bilinear", align_corners=self.align_corners - ) - - output = self.out_conv(output) - - return output - diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/models/ocrnet_hr18.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/models/ocrnet_hr18.py deleted file mode 100644 index c60f62a7cdf3f5c5096a7a7e725e8268fddcb057..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/models/ocrnet_hr18.py +++ /dev/null @@ -1,68 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='CascadeEncoderDecoder', - num_stages=2, - pretrained='open-mmlab://msra/hrnetv2_w18', - backbone=dict( - type='HRNet', - norm_cfg=norm_cfg, - norm_eval=False, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[18, 36, 72, 144], - channels=sum([18, 36, 72, 144]), - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - kernel_size=1, - num_convs=1, - concat_input=False, - dropout_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[18, 36, 72, 144], - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - channels=512, - ocr_channels=256, - dropout_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - ], - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/logger/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/logger/__init__.py deleted file mode 100644 index a0b6b345640a895368ac8a647afef6f24333d90e..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/logger/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base import LoggerHook -from .dvclive import DvcliveLoggerHook -from .mlflow import MlflowLoggerHook -from .neptune import NeptuneLoggerHook -from .pavi import PaviLoggerHook -from .tensorboard import TensorboardLoggerHook -from .text import TextLoggerHook -from .wandb import WandbLoggerHook - -__all__ = [ - 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook', - 'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook', - 'NeptuneLoggerHook', 'DvcliveLoggerHook' -] diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/pyrender/node.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/pyrender/node.py deleted file mode 100644 index 1f37f7856cc732a37dc58253022a7c331489493e..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/pyrender/node.py +++ /dev/null @@ -1,263 +0,0 @@ -"""Nodes, conforming to the glTF 2.0 standards as specified in -https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-node - -Author: Matthew Matl -""" -import numpy as np - -import trimesh.transformations as transformations - -from .camera import Camera -from .mesh import Mesh -from .light import Light - - -class Node(object): - """A node in the node hierarchy. - - Parameters - ---------- - name : str, optional - The user-defined name of this object. - camera : :class:`Camera`, optional - The camera in this node. - children : list of :class:`Node` - The children of this node. - skin : int, optional - The index of the skin referenced by this node. - matrix : (4,4) float, optional - A floating-point 4x4 transformation matrix. - mesh : :class:`Mesh`, optional - The mesh in this node. - rotation : (4,) float, optional - The node's unit quaternion in the order (x, y, z, w), where - w is the scalar. - scale : (3,) float, optional - The node's non-uniform scale, given as the scaling factors along the x, - y, and z axes. - translation : (3,) float, optional - The node's translation along the x, y, and z axes. - weights : (n,) float - The weights of the instantiated Morph Target. Number of elements must - match number of Morph Targets of used mesh. - light : :class:`Light`, optional - The light in this node. - """ - - def __init__(self, - name=None, - camera=None, - children=None, - skin=None, - matrix=None, - mesh=None, - rotation=None, - scale=None, - translation=None, - weights=None, - light=None): - # Set defaults - if children is None: - children = [] - - self._matrix = None - self._scale = None - self._rotation = None - self._translation = None - if matrix is None: - if rotation is None: - rotation = np.array([0.0, 0.0, 0.0, 1.0]) - if translation is None: - translation = np.zeros(3) - if scale is None: - scale = np.ones(3) - self.rotation = rotation - self.translation = translation - self.scale = scale - else: - self.matrix = matrix - - self.name = name - self.camera = camera - self.children = children - self.skin = skin - self.mesh = mesh - self.weights = weights - self.light = light - - @property - def name(self): - """str : The user-defined name of this object. - """ - return self._name - - @name.setter - def name(self, value): - if value is not None: - value = str(value) - self._name = value - - @property - def camera(self): - """:class:`Camera` : The camera in this node. - """ - return self._camera - - @camera.setter - def camera(self, value): - if value is not None and not isinstance(value, Camera): - raise TypeError('Value must be a camera') - self._camera = value - - @property - def children(self): - """list of :class:`Node` : The children of this node. - """ - return self._children - - @children.setter - def children(self, value): - self._children = value - - @property - def skin(self): - """int : The skin index for this node. - """ - return self._skin - - @skin.setter - def skin(self, value): - self._skin = value - - @property - def mesh(self): - """:class:`Mesh` : The mesh in this node. - """ - return self._mesh - - @mesh.setter - def mesh(self, value): - if value is not None and not isinstance(value, Mesh): - raise TypeError('Value must be a mesh') - self._mesh = value - - @property - def light(self): - """:class:`Light` : The light in this node. - """ - return self._light - - @light.setter - def light(self, value): - if value is not None and not isinstance(value, Light): - raise TypeError('Value must be a light') - self._light = value - - @property - def rotation(self): - """(4,) float : The xyzw quaternion for this node. - """ - return self._rotation - - @rotation.setter - def rotation(self, value): - value = np.asanyarray(value) - if value.shape != (4,): - raise ValueError('Quaternion must be a (4,) vector') - if np.abs(np.linalg.norm(value) - 1.0) > 1e-3: - raise ValueError('Quaternion must have norm == 1.0') - self._rotation = value - self._matrix = None - - @property - def translation(self): - """(3,) float : The translation for this node. - """ - return self._translation - - @translation.setter - def translation(self, value): - value = np.asanyarray(value) - if value.shape != (3,): - raise ValueError('Translation must be a (3,) vector') - self._translation = value - self._matrix = None - - @property - def scale(self): - """(3,) float : The scale for this node. - """ - return self._scale - - @scale.setter - def scale(self, value): - value = np.asanyarray(value) - if value.shape != (3,): - raise ValueError('Scale must be a (3,) vector') - self._scale = value - self._matrix = None - - @property - def matrix(self): - """(4,4) float : The homogenous transform matrix for this node. - - Note that this matrix's elements are not settable, - it's just a copy of the internal matrix. You can set the whole - matrix, but not an individual element. - """ - if self._matrix is None: - self._matrix = self._m_from_tqs( - self.translation, self.rotation, self.scale - ) - return self._matrix.copy() - - @matrix.setter - def matrix(self, value): - value = np.asanyarray(value) - if value.shape != (4,4): - raise ValueError('Matrix must be a 4x4 numpy ndarray') - if not np.allclose(value[3,:], np.array([0.0, 0.0, 0.0, 1.0])): - raise ValueError('Bottom row of matrix must be [0,0,0,1]') - self.rotation = Node._q_from_m(value) - self.scale = Node._s_from_m(value) - self.translation = Node._t_from_m(value) - self._matrix = value - - @staticmethod - def _t_from_m(m): - return m[:3,3] - - @staticmethod - def _r_from_m(m): - U = m[:3,:3] - norms = np.linalg.norm(U.T, axis=1) - return U / norms - - @staticmethod - def _q_from_m(m): - M = np.eye(4) - M[:3,:3] = Node._r_from_m(m) - q_wxyz = transformations.quaternion_from_matrix(M) - return np.roll(q_wxyz, -1) - - @staticmethod - def _s_from_m(m): - return np.linalg.norm(m[:3,:3].T, axis=1) - - @staticmethod - def _r_from_q(q): - q_wxyz = np.roll(q, 1) - return transformations.quaternion_matrix(q_wxyz)[:3,:3] - - @staticmethod - def _m_from_tqs(t, q, s): - S = np.eye(4) - S[:3,:3] = np.diag(s) - - R = np.eye(4) - R[:3,:3] = Node._r_from_q(q) - - T = np.eye(4) - T[:3,3] = t - - return T.dot(R.dot(S)) diff --git a/spaces/aidealab/interior-ai/helpers.py b/spaces/aidealab/interior-ai/helpers.py deleted file mode 100644 index 8e00716f93d56f6d10ab44fb2c3f856bc38bf4fb..0000000000000000000000000000000000000000 --- a/spaces/aidealab/interior-ai/helpers.py +++ /dev/null @@ -1,47 +0,0 @@ -import gc -import torch -from scipy.signal import fftconvolve -from PIL import Image -import numpy as np - -def flush(): - gc.collect() - torch.cuda.empty_cache() - - - -def convolution(mask: Image.Image, size=9) -> Image: - """Method to blur the mask - Args: - mask (Image): masking image - size (int, optional): size of the blur. Defaults to 9. - Returns: - Image: blurred mask - """ - mask = np.array(mask.convert("L")) - conv = np.ones((size, size)) / size**2 - mask_blended = fftconvolve(mask, conv, 'same') - mask_blended = mask_blended.astype(np.uint8).copy() - - border = size - - # replace borders with original values - mask_blended[:border, :] = mask[:border, :] - mask_blended[-border:, :] = mask[-border:, :] - mask_blended[:, :border] = mask[:, :border] - mask_blended[:, -border:] = mask[:, -border:] - - return Image.fromarray(mask_blended).convert("L") - - -def postprocess_image_masking(inpainted: Image, image: Image, mask: Image) -> Image: - """Method to postprocess the inpainted image - Args: - inpainted (Image): inpainted image - image (Image): original image - mask (Image): mask - Returns: - Image: inpainted image - """ - final_inpainted = Image.composite(inpainted.convert("RGBA"), image.convert("RGBA"), mask) - return final_inpainted.convert("RGB") diff --git a/spaces/aiswaryasankar/entelligence.ai/app.py b/spaces/aiswaryasankar/entelligence.ai/app.py deleted file mode 100644 index 8fa46ab92c1f691c7cf99ab07effb8b979df00ba..0000000000000000000000000000000000000000 --- a/spaces/aiswaryasankar/entelligence.ai/app.py +++ /dev/null @@ -1,622 +0,0 @@ -import gradio as gr -import os -from queue import SimpleQueue -from langchain.callbacks.manager import CallbackManager -from langchain.chat_models import ChatOpenAI -from pydantic import BaseModel -import requests -import typing -from typing import TypeVar, Generic -import tqdm -from langchain.chains import ConversationalRetrievalChain -import os -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.vectorstores import DeepLake -import random -import time -import os -from langchain.document_loaders import TextLoader -from langchain.text_splitter import CharacterTextSplitter -import math -import subprocess - -from langchain.callbacks.base import BaseCallbackHandler -from langchain.schema import LLMResult -from typing import Any, Union - -job_done = object() - -class StreamingGradioCallbackHandler(BaseCallbackHandler): - def __init__(self, q: SimpleQueue): - self.q = q - - def on_llm_start( - self, serialized: typing.Dict[str, Any], prompts: typing.List[str], **kwargs: Any - ) -> None: - """Run when LLM starts running. Clean the queue.""" - while not self.q.empty(): - try: - self.q.get(block=False) - except Empty: - continue - - def on_llm_new_token(self, token: str, **kwargs: Any) -> None: - """Run on new LLM token. Only available when streaming is enabled.""" - self.q.put(token) - - def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: - """Run when LLM ends running.""" - self.q.put(job_done) - - def on_llm_error( - self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any - ) -> None: - """Run when LLM errors.""" - self.q.put(job_done) - - -class Response(BaseModel): - result: typing.Any - error: str - stdout: str - repo: str - -class HumanPrompt(BaseModel): - prompt: str - -class GithubResponse(BaseModel): - result: typing.Any - error: str - stdout: str - repo: str - - -repo_name = gr.State() -git_tickets = gr.State() -git_titles = gr.State() -git_ticket_choices = gr.State() -vector_db_url = gr.State() -git_tickets.value = [] -git_titles.value = [] -git_ticket_choices.value = [] - -embeddings = OpenAIEmbeddings(disallowed_special=()) - -def git_clone(repo_url): - subprocess.run(["git", "clone", repo_url]) - dirpath = repo_url.split('/')[-1] - if dirpath.lower().endswith('.git'): - dirpath = dirpath[:-4] - return dirpath - - -def index_repo(textbox: str, dropdown: str) -> Response: - - mapping = { - "Langchain" : "https://github.com/langchain-ai/langchain.git", - "Weaviate": "https://github.com/weaviate/weaviate.git", - "Llama2": "https://github.com/facebookresearch/llama.git", - "OpenAssistant": "https://github.com/LAION-AI/Open-Assistant.git", - "MemeAI": "https://github.com/aiswaryasankar/memeAI.git", - "GenerativeAgents": "https://github.com/joonspk-research/generative_agents.git" - } - - if textbox != "": - repo = textbox - else: - repo = mapping[dropdown[0]] - - repo_name.value = repo - pathName = git_clone(repo) - root_dir = './' + pathName - - activeloop_username = "aiswaryas" - dataset_path = f"hub://{activeloop_username}/" + pathName - invalid_dataset_path = False - - try: - try: - db = DeepLake(dataset_path=dataset_path, - embedding_function=embeddings, - token=os.environ['ACTIVELOOP_TOKEN'], - read_only=True, - num_workers=12, - runtime = {"tensor_db": True} - ) - except Exception as e: - print("Failed to read: " + str(e)) - if "scheduled for deletion" in str(e): - dataset_path = f"hub://{activeloop_username}/" + pathName + str(random.randint(1,100)) - invalid_dataset_path = True - - print(invalid_dataset_path) - print(db) - print(len(db.vectorstore.dataset)) - if invalid_dataset_path or db is None or len(db.vectorstore.dataset) == 0: - print("Dataset doesn't exist, fetching data") - try: - docs = [] - for dirpath, dirnames, filenames in os.walk(root_dir): - for file in filenames: - print(file) - try: - loader = TextLoader(os.path.join(dirpath, file), encoding='utf-8') - docs.extend(loader.load_and_split()) - except Exception as e: - print("Exception: " + str(e) + "| File: " + os.path.join(dirpath, file)) - pass - - activeloop_username = "aiswaryas" - text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) - texts = text_splitter.split_documents(docs) - - db = DeepLake(dataset_path=dataset_path, - embedding_function=embeddings, - token=os.environ['ACTIVELOOP_TOKEN'], - read_only=False, - num_workers=12, - runtime = {"tensor_db": True} - ) - # Do this in chunks to avoid hitting the ratelimit immediately - for i in range(0, len(texts), 500): - print("Adding documents " + str(i)) - db.add_documents(texts[i:i+500]) - time.sleep(.5) - - except Exception as e: - return Response( - result= "Failed to index github repo", - repo="", - error=str(e), - stdout="", - ) - - except Exception as e: - return Response( - result= "Failed to index github repo", - repo="", - error=str(e), - stdout="", - ) - - vector_db_url.value = dataset_path - - return { - success_response: "SUCCESS", - launch_product: gr.update(visible=True) - } - - -def answer_questions(question: str, github: str, **kwargs) -> Response: - - repoName = repo_name.value - github = repoName[:-4] - print("REPO NAME: " + github) - - try: - embeddings = OpenAIEmbeddings(disallowed_special=()) - pathName = github.split('/')[-1] - dataset_path = vector_db_url.value - - print("before reading repo") - db = DeepLake(dataset_path=dataset_path, read_only=True, embedding_function=embeddings) - - print("finished indexing repo") - retriever = db.as_retriever() - retriever.search_kwargs['distance_metric'] = 'cos' - retriever.search_kwargs['fetch_k'] = 100 - retriever.search_kwargs['maximal_marginal_relevance'] = True - retriever.search_kwargs['k'] = 20 - - q = SimpleQueue() - model = ChatOpenAI( - model_name='gpt-3.5-turbo-16k', - temperature=0.0, - verbose=True, - streaming=True, # Pass `streaming=True` to make sure the client receives the data. - callback_manager=CallbackManager( - [StreamingGradioCallbackHandler(q)] - ), - ) - qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever, max_tokens_limit=16000) - chat_history = [] - - except Exception as e: - print("Exception: " + str(e)) - return Response( - result="", - repo="", - error=str(e), - stdout="", - ) - - return Response( - result=qa({"question": question, "chat_history": chat_history}), - repo="", - error="", - stdout="", - ) - - -def fetchGithubIssues(**kwargs) -> Response: - """ - This endpoint should get a list of all the github issues that are open for this repository - """ - repo = "/".join(repo_name.value[:-4].split("/")[-2:]) - print("REPO NAME IN FETCH GITHUB ISSUES: " + str(repo)) - - batch = [] - all_issues = [] - per_page = 100 # Number of issues to return per page - num_pages = math.ceil(20 / per_page) - base_url = "https://api.github.com/repos" - - GITHUB_TOKEN = "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" # Copy your GitHub token here - headers = {"Authorization": f"token {GITHUB_TOKEN}"} - - issues_data = [] - - for page in range(num_pages): - # Query with state=all to get both open and closed issues - query = f"issues?page={page}&per_page={per_page}&state=all" - issues = requests.get(f"{base_url}/{repo}/{query}", headers=headers) - print(f"{base_url}/{repo}/{query}") - - batch.extend(issues.json()) - for issue in issues.json(): - issues_data.append({ - "issue_url": issue["url"], - "title": issue["title"], - "body": issue["body"], - "comments_url": issue["comments_url"], - }) - - # This should set the state variables for tickets - git_tickets.value = issues_data - git_ticket_choices.value = {ticket["title"]: ticket for ticket in issues_data} - git_titles.value = [ticket["title"] for ticket in issues_data] - return issues_data - - -def generateFolderNamesForRepo(repo): - """ - This endpoint will first take the repo structure and return the folder and subfolder names. - From those names, it will then prompt the model to generate an architecture diagram of that folder. - There will be three "modules" no input just output that take the autogenerated prompts based on the - input data and generate the responses that are displayed in the UI. - """ - pathName = git_clone(repo) - root_dir = './' + pathName - - files, dirs, docs = [], [], [] - for dirpath, dirnames, filenames in os.walk(root_dir): - for file in filenames: - try: - loader = TextLoader(os.path.join(dirpath, file), encoding='utf-8') - docs.extend(loader.load_and_split()) - files.append(file) - dirs.append(dirnames) - except Exception as e: - print("Exception: " + str(e) + "| File: " + os.path.join(dirpath, file)) - pass - - return dirs - - -def generateDocumentationPerFolder(dir, github): - - if dir == "overview": - prompt= """ - Summarize the structure of the {} repository. Make a list of all endpoints and their behavior. Explain - how this module is used in the scope of the larger project. Format the response as code documentation with an - Overview, Architecture and Implementation Details. Within implementation details, list out each function and provide - an overview of that function. - """.format(github) - else: - prompt= """ - Summarize how {} is implemented in the {} repository. Make a list of all functions and their behavior. Explain - how this module is used in the scope of the larger project. Format the response as code documentation with an - Overview, Architecture and Implementation Details. Within implementation details, list out each function and provide - an overview of that function. - """.format(dir, github) - - try: - embeddings = OpenAIEmbeddings(disallowed_special=()) - pathName = github.split('/')[-1] - dataset_path = vector_db_url.value - - db = DeepLake(dataset_path=dataset_path, read_only=True, embedding_function=embeddings) - - retriever = db.as_retriever() - retriever.search_kwargs['distance_metric'] = 'cos' - retriever.search_kwargs['fetch_k'] = 100 - retriever.search_kwargs['maximal_marginal_relevance'] = True - retriever.search_kwargs['k'] = 20 - - # streaming_handler = kwargs.get('streaming_handler') - model = ChatOpenAI( - model_name='gpt-3.5-turbo-16k', - temperature=0.0, - verbose=True, - streaming=True, # Pass `streaming=True` to make sure the client receives the data. - ) - qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever, max_tokens_limit=16000) - chat_history = [] - return qa({"question": prompt, "chat_history": chat_history})["answer"] - - except Exception as e: - print (str(e)) - return "Failed to generate documentation" - - -def solveGithubIssue(ticket, history) -> Response: - """ - This endpoint takes in a github issue and then queries the db for the question against the codebase. - """ - repoName = repo_name.value - github = repoName[:-4] - - repoFolder = github.split("/")[-1] - body = git_ticket_choices.value[ticket]["body"] - title = git_ticket_choices.value[ticket]["title"] - question = """ - Given the code in the {} repo, propose a solution for this ticket {} that includes a - high level implementation, narrowing down the root cause of the issue and psuedocode if - applicable on how to resolve the issue. If multiple changes are required to address the - problem, list out each of the steps and a brief explanation for each one. - """.format(repoFolder, body) - - q_display = """ - Can you explain how to approach solving this ticket: {}. Here is a summary of the issue: {} - """.format(title, body) - - try: - embeddings = OpenAIEmbeddings(disallowed_special=()) - pathName = github.split('/')[-1] - dataset_path = vector_db_url.value - - db = DeepLake(dataset_path=dataset_path, read_only=True, embedding=embeddings) - - retriever = db.as_retriever() - retriever.search_kwargs['distance_metric'] = 'cos' - retriever.search_kwargs['fetch_k'] = 100 - retriever.search_kwargs['maximal_marginal_relevance'] = True - retriever.search_kwargs['k'] = 20 - - q = SimpleQueue() - model = ChatOpenAI( - model_name='gpt-3.5-turbo-16k', - temperature=0.0, - verbose=True, - streaming=True, # Pass `streaming=True` to make sure the client receives the data. - callback_manager=CallbackManager( - [StreamingGradioCallbackHandler(q)] - ), - ) - qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever, max_tokens_limit=16000) - - except Exception as e: - return [[str(e), None]] - - history = [[q_display, ""]] - history[-1][1] = "" - # Flatten the list of lists - flat_list = [item for sublist in history for item in sublist] - flat_list = [item for item in flat_list if item is not None] - - print(flat_list) - for char in qa({"question": question, "chat_history": []})["answer"]: - history[-1][1] += char - yield history - - -def user(message, history): - return "", history + [[message, None]] - - -def bot(history, **kwargs): - - user_message = history[-1][0] - - # global repoName - repoName = repo_name.value - print("STATE REPO NAME: " + repoName) - github = repoName[:-4] - try: - embeddings = OpenAIEmbeddings(disallowed_special=()) - pathName = github.split('/')[-1] - dataset_path = vector_db_url.value - - db = DeepLake(dataset_path=dataset_path, read_only=True, embedding_function=embeddings) - retriever = db.as_retriever() - retriever.search_kwargs['distance_metric'] = 'cos' - retriever.search_kwargs['fetch_k'] = 100 - retriever.search_kwargs['maximal_marginal_relevance'] = True - retriever.search_kwargs['k'] = 20 - - q = SimpleQueue() - model = ChatOpenAI( - model_name='gpt-3.5-turbo-16k', - temperature=0.0, - verbose=True, - streaming=True, # Pass `streaming=True` to make sure the client receives the data. - callback_manager=CallbackManager( - [StreamingGradioCallbackHandler(q)] - ), - ) - qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever, max_tokens_limit=16000, return_source_documents=True, get_chat_history=lambda h : h) - chat_history = [] - - except Exception as e: - print("Exception: " + str(e)) - return str(e) - - history[-1][1] = "" - - for char in qa({"question": user_message, "chat_history": []})["answer"]: - history[-1][1] += char - yield history - - -with gr.Blocks() as demo: - - # repoName = gr.State(value="https://github.com/sourcegraph/cody.git") - - gr.Markdown(""" -

Entelligence AI

-

Enabling your product team to ship product 10x faster.

- """) - - repoTextBox = gr.Textbox(label="Github Repository") - - gr.Markdown("""Choose from any of the following repositories""") - ingestedRepos = gr.CheckboxGroup(choices=['Langchain', 'Weaviate', 'OpenAssistant', 'GenerativeAgents','Llama2', "MemeAI"], label="Github Repository", value="Langchain") - - success_response = gr.Textbox(label="") - ingest_btn = gr.Button("Index repo") - - with gr.Column(visible=False) as launch_product: - - # Toggle visibility of the chat, bugs, docs, model windows - with gr.Tab("Code Chat"): - chatbot = gr.Chatbot() - msg = gr.Textbox() - clear = gr.Button("Clear") - - msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( - bot, chatbot, chatbot - ) - clear.click(lambda: None, None, chatbot, queue=False) - - - index = 0 - with gr.Tab("Bug Triage"): - - # Display the titles in the dropdown - def create_ticket_dropdown(): - print(git_titles.value) - return ticketDropdown.update( - choices=git_titles.value - ) - - ticketDropdown = gr.Dropdown(choices=[], title="Github Issues", interactive=True) - ticketDropdown.focus(create_ticket_dropdown, outputs=ticketDropdown) - chatbot = gr.Chatbot() - msg = gr.Textbox() - clear = gr.Button("Clear") - ticketDropdown.change(solveGithubIssue, inputs=[ticketDropdown, chatbot], outputs=[chatbot]) - msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( - bot, chatbot, chatbot - ) - clear.click(lambda: None, None, chatbot, queue=False) - - - # with gr.Tab("AI Code Documentation"): - - # repoName = repo_name.value - # # First parse through the folder structure and store that as a list of clickable buttons - # gr.Markdown(""" - # ## AI Generated Code Documentation - # Code documentation comes in 3 flavors - internal engineering, external API documentation and product documentation. Each offers different layers of abstraction over the code base. - # """) - - # # docs = generateDocumentationPerFolder("overview", repo_name) - - # # For now let's just display all of the docs in one big file - # allDocs = "" - # dirNames = generateFolderNamesForRepo(repoName[:-4]) - # for dir in dirNames: - # if dir[0] != ".": - # allDocs += generateDocumentationPerFolder(dir, repoName[:-4]) + '\n\n' - - # gr.Markdown(allDocs) - - # def button_click_callback(markdown): - # docs = generateDocumentationPerFolder("overview", repoName[:-4]) - # markdown.update(docs) - - # markdown = gr.Markdown() - # # Generate the left column buttons and their names and wrap each one in a function - # with gr.Row(): - # with gr.Column(scale=.5, min_width=300): - # dirNames = generateFolderNamesForRepo(repoName[:-4]) - # buttons = [gr.Button(folder_name) for folder_name in dirNames] - # for btn, folder_name in zip(buttons, dirNames): - # btn.click(button_click_callback, [markdown], [markdown] ) - - - # # Generate the overall documentation for the main bubble at the same time - # with gr.Column(scale=2, min_width=300): - # docs = generateDocumentationPerFolder("overview", repoName[:-4]) - # markdown.update(docs) - # # markdown.render() - - - with gr.Tab("Custom Model Finetuning"): - # First provide a summary of offering - gr.Markdown(""" - # Enterprise Custom Model Finetuning - Finetuning code generation models directly on your enterprise code base has shown up to 10% increase in model suggestion acceptance rate. - """) - - # Choose base model - radio with model size - gr.Radio(choices=["Santacoder (1.1B parameter model)", "Incoder (6B parameter model)", "Codegen (16B parameter model)", "Starcoder (15.5B parameter model)"] , value="Starcoder (15.5B parameter model)") - - # Choose existing code base or input a new code base for finetuning - - with gr.Row(): - gr.Markdown(""" - If you'd like to use the current code base, click this toggle otherwise input the entire code base below. - """) - existing_repo = gr.Checkbox(value=True, label="Use existing repository") - gr.Textbox(label="Input repository", visible=False) - - # Allow option to remove generated files etc etc - gr.Markdown(""" - Finetuned model performance is highly dependent on training data quality. We have currently found that excluding the following file types improves performance. If you'd like to include them, please toggle them. - """) - file_types = gr.CheckboxGroup(choices=['.bin', '.gen', '.git', '.gz','.jpg', '.lz', '.midi', '.mpq','.png', '.tz'], label="Removed file types") - - # Based on data above, we should show a field for estimated fine tuning cost - # Then we should show the chart for loss - def wandb_report(url): - iframe = f' - - - - \ No newline at end of file diff --git a/spaces/npc0/BookSumBeta/api.py b/spaces/npc0/BookSumBeta/api.py deleted file mode 100644 index 896a3cc7449a9dfe5c5ea1200a4ea4baf68d19f0..0000000000000000000000000000000000000000 --- a/spaces/npc0/BookSumBeta/api.py +++ /dev/null @@ -1,42 +0,0 @@ -import os - -from fastapi import FastAPI, WebSocket -from epub2txt import epub2txt - -from fastllm_pytools import llm - -model = llm.model(os.getenv("checkpoint_path")) -prompt = os.getenv("prompt") - -app = FastAPI() - - -@app.websocket("/ws") -async def read_root(websocket: WebSocket): - await websocket.accept() - f_name = await websocket.receive_text() - ch_list = epub2txt(f_name, outputlist=True) - chapter_titles = epub2txt.content_titles - title = epub2txt.title - - idx = 0 - sm_list = [] - for text in ch_list[2:]: - idx += 1 - docs = [] - for i in range(0, len(text)//2000+1, 2000): - t = text[i:i+2048] - if len(t) > 0: - docs.append(model.response(prompt+t)) - await websocket.send_text(f"chsum: {docs[-1]}") - hist = docs[0] - for doc in docs[1:]: - hist = model.response(prompt+"\n"+hist+"\n"+doc) - await websocket.send_text(f"draft_sum: {hist}") - sm_list.append(hist) - mdobj_str = f"# {title}\n\n{hist}\n\n\n" - for ct, sm in zip(chapter_titles[2:], sm_list): - mdobj_str += f"## {ct}\n\n{sm}\n\n\n" - await websocket.send_text(f"output: {mdobj_str}") - -# uvicorn api:app --reload \ No newline at end of file diff --git a/spaces/odettecantswim/rvc-mlbb-v2/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/spaces/odettecantswim/rvc-mlbb-v2/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py deleted file mode 100644 index b412ba2814e114ca7bb00b6fd6ef217f63d788a3..0000000000000000000000000000000000000000 --- a/spaces/odettecantswim/rvc-mlbb-v2/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +++ /dev/null @@ -1,86 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class HarvestF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.hop_length, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/openflamingo/OpenFlamingo/open_flamingo/open_flamingo/eval/evaluate.py b/spaces/openflamingo/OpenFlamingo/open_flamingo/open_flamingo/eval/evaluate.py deleted file mode 100644 index 36793293664d2e8208735616f4a707ef703ff8cb..0000000000000000000000000000000000000000 --- a/spaces/openflamingo/OpenFlamingo/open_flamingo/open_flamingo/eval/evaluate.py +++ /dev/null @@ -1,1247 +0,0 @@ -import argparse -import importlib -import json -import os -import random -import uuid -from collections import defaultdict - -from einops import repeat -import more_itertools -import numpy as np -import torch -from sklearn.metrics import roc_auc_score - -from coco_metric import compute_cider, postprocess_captioning_generation -from eval_datasets import ( - CaptionDataset, - VQADataset, - ImageNetDataset, - HatefulMemesDataset, -) -from tqdm import tqdm - - -from eval_datasets import VQADataset, ImageNetDataset -from classification_utils import ( - IMAGENET_CLASSNAMES, - IMAGENET_1K_CLASS_ID_TO_LABEL, - HM_CLASSNAMES, - HM_CLASS_ID_TO_LABEL, -) - -from eval_model import BaseEvalModel - -from ok_vqa_utils import postprocess_ok_vqa_generation -from open_flamingo.src.flamingo import Flamingo -from vqa_metric import compute_vqa_accuracy, postprocess_vqa_generation - -from open_flamingo.train.distributed import init_distributed_device, world_info_from_env - -parser = argparse.ArgumentParser() - -parser.add_argument( - "--model", - type=str, - help="Model name. Currently only `OpenFlamingo` is supported.", - default="open_flamingo", -) -parser.add_argument( - "--results_file", type=str, default=None, help="JSON file to save results" -) - -# Trial arguments -parser.add_argument("--shots", nargs="+", default=[0, 4, 8, 16, 32], type=int) -parser.add_argument( - "--num_trials", - type=int, - default=1, - help="Number of trials to run for each shot using different demonstrations", -) -parser.add_argument( - "--trial_seeds", - nargs="+", - type=int, - default=[42], - help="Seeds to use for each trial for picking demonstrations and eval sets", -) -parser.add_argument( - "--num_samples", type=int, default=-1, help="Number of samples to evaluate on. -1 for all samples." -) -parser.add_argument( - "--query_set_size", type=int, default=2048, help="Size of demonstration query set" -) - -parser.add_argument("--batch_size", type=int, default=8) - -parser.add_argument("--use_kv_caching_for_classification", - action="store_true", - help="Use key-value caching for classification evals to speed it up. Currently this doesn't underperforms for MPT models." - ) - -# Per-dataset evaluation flags -parser.add_argument( - "--eval_coco", - action="store_true", - default=False, - help="Whether to evaluate on COCO.", -) -parser.add_argument( - "--eval_vqav2", - action="store_true", - default=False, - help="Whether to evaluate on VQAV2.", -) -parser.add_argument( - "--eval_ok_vqa", - action="store_true", - default=False, - help="Whether to evaluate on OK-VQA.", -) -parser.add_argument( - "--eval_vizwiz", - action="store_true", - default=False, - help="Whether to evaluate on VizWiz.", -) -parser.add_argument( - "--eval_textvqa", - action="store_true", - default=False, - help="Whether to evaluate on TextVQA.", -) -parser.add_argument( - "--eval_imagenet", - action="store_true", - default=False, - help="Whether to evaluate on ImageNet.", -) -parser.add_argument( - "--eval_flickr30", - action="store_true", - default=False, - help="Whether to evaluate on Flickr30.", -) -parser.add_argument( - "--eval_hateful_memes", - action="store_true", - default=False, - help="Whether to evaluate on Hateful Memes.", -) - -# Dataset arguments - -## Flickr30 Dataset -parser.add_argument( - "--flickr_image_dir_path", - type=str, - help="Path to the flickr30/flickr30k_images directory.", - default=None, -) -parser.add_argument( - "--flickr_karpathy_json_path", - type=str, - help="Path to the dataset_flickr30k.json file.", - default=None, -) -parser.add_argument( - "--flickr_annotations_json_path", - type=str, - help="Path to the dataset_flickr30k_coco_style.json file.", -) -## COCO Dataset -parser.add_argument( - "--coco_train_image_dir_path", - type=str, - default=None, -) -parser.add_argument( - "--coco_val_image_dir_path", - type=str, - default=None, -) -parser.add_argument( - "--coco_karpathy_json_path", - type=str, - default=None, -) -parser.add_argument( - "--coco_annotations_json_path", - type=str, - default=None, -) - -## VQAV2 Dataset -parser.add_argument( - "--vqav2_train_image_dir_path", - type=str, - default=None, -) -parser.add_argument( - "--vqav2_train_questions_json_path", - type=str, - default=None, -) -parser.add_argument( - "--vqav2_train_annotations_json_path", - type=str, - default=None, -) -parser.add_argument( - "--vqav2_test_image_dir_path", - type=str, - default=None, -) -parser.add_argument( - "--vqav2_test_questions_json_path", - type=str, - default=None, -) -parser.add_argument( - "--vqav2_test_annotations_json_path", - type=str, - default=None, -) - -## OK-VQA Dataset -parser.add_argument( - "--ok_vqa_train_image_dir_path", - type=str, - help="Path to the vqav2/train2014 directory.", - default=None, -) -parser.add_argument( - "--ok_vqa_train_questions_json_path", - type=str, - help="Path to the v2_OpenEnded_mscoco_train2014_questions.json file.", - default=None, -) -parser.add_argument( - "--ok_vqa_train_annotations_json_path", - type=str, - help="Path to the v2_mscoco_train2014_annotations.json file.", - default=None, -) -parser.add_argument( - "--ok_vqa_test_image_dir_path", - type=str, - help="Path to the vqav2/val2014 directory.", - default=None, -) -parser.add_argument( - "--ok_vqa_test_questions_json_path", - type=str, - help="Path to the v2_OpenEnded_mscoco_val2014_questions.json file.", - default=None, -) -parser.add_argument( - "--ok_vqa_test_annotations_json_path", - type=str, - help="Path to the v2_mscoco_val2014_annotations.json file.", - default=None, -) - -## VizWiz Dataset -parser.add_argument( - "--vizwiz_train_image_dir_path", - type=str, - help="Path to the vizwiz train images directory.", - default=None, -) -parser.add_argument( - "--vizwiz_test_image_dir_path", - type=str, - help="Path to the vizwiz test images directory.", - default=None, -) -parser.add_argument( - "--vizwiz_train_questions_json_path", - type=str, - help="Path to the vizwiz questions json file.", - default=None, -) -parser.add_argument( - "--vizwiz_train_annotations_json_path", - type=str, - help="Path to the vizwiz annotations json file.", - default=None, -) -parser.add_argument( - "--vizwiz_test_questions_json_path", - type=str, - help="Path to the vizwiz questions json file.", - default=None, -) -parser.add_argument( - "--vizwiz_test_annotations_json_path", - type=str, - help="Path to the vizwiz annotations json file.", - default=None, -) - -# TextVQA Dataset -parser.add_argument( - "--textvqa_image_dir_path", - type=str, - help="Path to the textvqa images directory.", - default=None, -) -parser.add_argument( - "--textvqa_train_questions_json_path", - type=str, - help="Path to the textvqa questions json file.", - default=None, -) -parser.add_argument( - "--textvqa_train_annotations_json_path", - type=str, - help="Path to the textvqa annotations json file.", - default=None, -) -parser.add_argument( - "--textvqa_test_questions_json_path", - type=str, - help="Path to the textvqa questions json file.", - default=None, -) -parser.add_argument( - "--textvqa_test_annotations_json_path", - type=str, - help="Path to the textvqa annotations json file.", - default=None, -) - -## Imagenet dataset -parser.add_argument("--imagenet_root", type=str, default="/tmp") - -## Hateful Memes dataset -parser.add_argument( - "--hateful_memes_image_dir_path", - type=str, - default=None, -) -parser.add_argument( - "--hateful_memes_train_annotations_json_path", - type=str, - default=None, -) -parser.add_argument( - "--hateful_memes_test_annotations_json_path", - type=str, - default=None, -) - -# Distributed evaluation -parser.add_argument( - "--dist-url", - default="env://", - type=str, - help="url used to set up distributed training", -) -parser.add_argument( - "--dist-backend", default="nccl", type=str, help="distributed backend" -) -parser.add_argument( - "--horovod", - default=False, - action="store_true", - help="Use horovod for distributed training.", -) -parser.add_argument( - "--no-set-device-rank", - default=False, - action="store_true", - help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).", -) - - -def main(): - args, leftovers = parser.parse_known_args() - module = importlib.import_module(f"open_flamingo.eval.models.{args.model}") - - model_args = { - leftovers[i].lstrip("-"): leftovers[i + 1] for i in range(0, len(leftovers), 2) - } - eval_model = module.EvalModel(model_args) - - # set up distributed evaluation - args.local_rank, args.rank, args.world_size = world_info_from_env() - device_id = init_distributed_device(args) - eval_model.set_device(device_id) - eval_model.init_distributed() - - if args.model != "open_flamingo" and args.shots != [0]: - raise ValueError("Only 0 shot eval is supported for non-open_flamingo models") - - if len(args.trial_seeds) != args.num_trials: - raise ValueError("Number of trial seeds must be == number of trials.") - - results = defaultdict(list) - - if args.eval_flickr30: - print("Evaluating on Flickr30k...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - cider_score = evaluate_captioning( - args, - eval_model=eval_model, - num_shots=shot, - seed=seed, - dataset_name="flickr", - min_generation_length=12, - max_generation_length=30, - num_beams=5, - ) - if args.rank == 0: - print(f"Shots {shot} Trial {trial} CIDEr score: {cider_score}") - scores.append(cider_score) - - if args.rank == 0: - print(f"Shots {shot} Mean CIDEr score: {np.nanmean(scores)}") - results["flickr30"].append( - {"shots": shot, "trials": scores, "mean": np.nanmean(scores)} - ) - - if args.eval_coco: - print("Evaluating on COCO...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - cider_score = evaluate_captioning( - args, - eval_model=eval_model, - num_shots=shot, - seed=seed, - dataset_name="coco", - ) - if args.rank == 0: - print(f"Shots {shot} Trial {trial} CIDEr score: {cider_score}") - scores.append(cider_score) - - if args.rank == 0: - print(f"Shots {shot} Mean CIDEr score: {np.nanmean(scores)}") - results["coco"].append( - {"shots": shot, "trials": scores, "mean": np.nanmean(scores)} - ) - - if args.eval_ok_vqa: - print("Evaluating on OK-VQA...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - ok_vqa_score = evaluate_vqa( - args=args, - eval_model=eval_model, - num_shots=shot, - seed=seed, - dataset_name="ok_vqa", - ) - if args.rank == 0: - print(f"Shots {shot} Trial {trial} OK-VQA score: {ok_vqa_score}") - scores.append(ok_vqa_score) - - if args.rank == 0: - print(f"Shots {shot} Mean OK-VQA score: {np.nanmean(scores)}") - results["ok_vqa"].append( - {"shots": shot, "trials": scores, "mean": np.nanmean(scores)} - ) - - if args.eval_vqav2: - print("Evaluating on VQAv2...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - vqa_score = evaluate_vqa( - args=args, - eval_model=eval_model, - num_shots=shot, - seed=seed, - dataset_name="vqav2", - ) - if args.rank == 0: - print(f"Shots {shot} Trial {trial} VQA score: {vqa_score}") - scores.append(vqa_score) - - if args.rank == 0: - print(f"Shots {shot} Mean VQA score: {np.nanmean(scores)}") - results["vqav2"].append( - {"shots": shot, "trials": scores, "mean": np.nanmean(scores)} - ) - - if args.eval_vizwiz: - print("Evaluating on VizWiz...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - vizwiz_score = evaluate_vqa( - args=args, - eval_model=eval_model, - num_shots=shot, - seed=seed, - dataset_name="vizwiz", - ) - if args.rank == 0: - print(f"Shots {shot} Trial {trial} VizWiz score: {vizwiz_score}") - scores.append(vizwiz_score) - - if args.rank == 0: - print(f"Shots {shot} Mean VizWiz score: {np.nanmean(scores)}") - results["vizwiz"].append( - {"shots": shot, "trials": scores, "mean": np.nanmean(scores)} - ) - - if args.eval_textvqa: - print("Evaluating on TextVQA...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - textvqa_score = evaluate_vqa( - args=args, - eval_model=eval_model, - num_shots=shot, - seed=seed, - dataset_name="textvqa", - max_generation_length=10, - ) - if args.rank == 0: - print(f"Shots {shot} Trial {trial} TextVQA score: {textvqa_score}") - scores.append(textvqa_score) - - if args.rank == 0: - print(f"Shots {shot} Mean TextVQA score: {np.nanmean(scores)}") - results["textvqa"].append( - {"shots": shot, "trials": scores, "mean": np.nanmean(scores)} - ) - - if args.eval_imagenet: - print("Evaluating on ImageNet...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - imagenet_score = evaluate_classification( - args, - eval_model=eval_model, - num_shots=shot, - seed=seed, - use_kv_caching=args.use_kv_caching_for_classification, - dataset_name="imagenet", - ) - if args.rank == 0: - print( - f"Shots {shot} Trial {trial} " f"ImageNet score: {imagenet_score}" - ) - scores.append(imagenet_score) - - if args.rank == 0: - print(f"Shots {shot} Mean ImageNet score: {np.nanmean(scores)}") - results["imagenet"].append( - {"shots": shot, "trials": scores, "mean": np.nanmean(scores)} - ) - - if args.eval_hateful_memes: - print("Evaluating on Hateful Memes...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - hateful_memes_score = evaluate_classification( - args, - eval_model=eval_model, - num_shots=shot, - seed=seed, - use_kv_caching=args.use_kv_caching_for_classification, - dataset_name="hateful_memes", - ) - if args.rank == 0: - print( - f"Shots {shot} Trial {trial} " - f"Hateful Memes score: {hateful_memes_score}" - ) - scores.append(hateful_memes_score) - - if args.rank == 0: - print(f"Shots {shot} Mean Hateful Memes score: {np.nanmean(scores)}") - results["hateful_memes"].append( - {"shots": shot, "trials": scores, "mean": np.nanmean(scores)} - ) - - if args.rank == 0 and args.results_file is not None: - with open(args.results_file, "w") as f: - json.dump(results, f) - - -def get_random_indices(num_samples, query_set_size, full_dataset, seed): - if num_samples + query_set_size > len(full_dataset): - raise ValueError( - f"num_samples + query_set_size must be less than {len(full_dataset)}" - ) - - # get a random subset of the dataset - np.random.seed(seed) - random_indices = np.random.choice( - len(full_dataset), num_samples + query_set_size, replace=False - ) - return random_indices - - -def get_query_set(train_dataset, query_set_size, seed): - np.random.seed(seed) - query_set = np.random.choice(len(train_dataset), query_set_size, replace=False) - return [train_dataset[i] for i in query_set] - - -def prepare_eval_samples(test_dataset, num_samples, batch_size, seed): - np.random.seed(seed) - random_indices = np.random.choice(len(test_dataset), num_samples, replace=False) - dataset = torch.utils.data.Subset(test_dataset, random_indices) - sampler = torch.utils.data.distributed.DistributedSampler(dataset) - loader = torch.utils.data.DataLoader( - dataset, - batch_size=batch_size, - sampler=sampler, - collate_fn=custom_collate_fn, - ) - return loader - - -def sample_batch_demos_from_query_set(query_set, num_samples, batch_size): - return [random.sample(query_set, num_samples) for _ in range(batch_size)] - - -def compute_effective_num_shots(num_shots, model_type): - if model_type == "open_flamingo": - return num_shots if num_shots > 0 else 2 - return num_shots - - -def custom_collate_fn(batch): - collated_batch = {} - for key in batch[0].keys(): - collated_batch[key] = [item[key] for item in batch] - return collated_batch - - -def evaluate_captioning( - args: argparse.Namespace, - eval_model: BaseEvalModel, - seed: int = 42, - min_generation_length: int = 0, - max_generation_length: int = 20, - num_beams: int = 3, - length_penalty: float = 0.0, - num_shots: int = 8, - dataset_name: str = "coco", -): - """Evaluate a model on COCO dataset. - - Args: - args (argparse.Namespace): arguments - eval_model (BaseEvalModel): model to evaluate - seed (int, optional): seed for random number generator. Defaults to 42. - max_generation_length (int, optional): maximum length of the generated caption. Defaults to 20. - num_beams (int, optional): number of beams to use for beam search. Defaults to 3. - length_penalty (float, optional): length penalty for beam search. Defaults to -2.0. - num_shots (int, optional): number of in-context samples to use. Defaults to 8. - dataset_name (str, optional): dataset to evaluate on. Can be "coco" or "flickr". Defaults to "coco". - Returns: - float: CIDEr score - - """ - - if dataset_name == "coco": - image_train_dir_path = args.coco_train_image_dir_path - image_val_dir_path = args.coco_val_image_dir_path - annotations_path = args.coco_karpathy_json_path - elif dataset_name == "flickr": - image_train_dir_path = ( - args.flickr_image_dir_path - ) # Note: calling this "train" for consistency with COCO but Flickr only has one split for images - image_val_dir_path = None - annotations_path = args.flickr_karpathy_json_path - else: - raise ValueError(f"Unsupported dataset: {dataset_name}") - - train_dataset = CaptionDataset( - image_train_dir_path=image_train_dir_path, - image_val_dir_path=image_val_dir_path, - annotations_path=annotations_path, - is_train=True, - dataset_name=dataset_name if dataset_name != "nocaps" else "coco", - ) - - test_dataset = CaptionDataset( - image_train_dir_path=image_train_dir_path, - image_val_dir_path=image_val_dir_path, - annotations_path=annotations_path, - is_train=False, - dataset_name=dataset_name, - ) - - effective_num_shots = compute_effective_num_shots(num_shots, args.model) - - test_dataset = prepare_eval_samples( - test_dataset, - args.num_samples if args.num_samples > 0 else len(test_dataset), - args.batch_size, - seed, - ) - - in_context_samples = get_query_set(train_dataset, args.query_set_size, seed) - - predictions = defaultdict() - - for batch in tqdm(test_dataset, desc=f"Running inference {dataset_name.upper()}"): - batch_demo_samples = sample_batch_demos_from_query_set( - in_context_samples, effective_num_shots, len(batch["image"]) - ) - - batch_images = [] - batch_text = [] - for i in range(len(batch["image"])): - if num_shots > 0: - context_images = [x["image"] for x in batch_demo_samples[i]] - else: - context_images = [] - batch_images.append(context_images + [batch["image"][i]]) - - context_text = "".join( - [ - eval_model.get_caption_prompt(caption=x["caption"].strip()) - for x in batch_demo_samples[i] - ] - ) - - # Keep the text but remove the image tags for the zero-shot case - if num_shots == 0: - context_text = context_text.replace("", "") - - batch_text.append(context_text + eval_model.get_caption_prompt()) - - outputs = eval_model.get_outputs( - batch_images=batch_images, - batch_text=batch_text, - min_generation_length=min_generation_length, - max_generation_length=max_generation_length, - num_beams=num_beams, - length_penalty=length_penalty, - ) - - new_predictions = [ - postprocess_captioning_generation(out).replace('"', "") for out in outputs - ] - - for i, sample_id in enumerate(batch["image_id"]): - predictions[sample_id] = { - "caption": new_predictions[i], - } - - # all gather - all_predictions = [None] * args.world_size - torch.distributed.all_gather_object(all_predictions, predictions) # list of dicts - - if args.rank != 0: - return - - all_predictions = { - k: v for d in all_predictions for k, v in d.items() - } # merge dicts - - # save the predictions to a temporary file - results_path = f"{dataset_name}results_{uuid.uuid4()}.json" - - with open(results_path, "w") as f: - f.write( - json.dumps( - [ - {"image_id": k, "caption": all_predictions[k]["caption"]} - for k in all_predictions - ], - indent=4, - ) - ) - - metrics = compute_cider( - result_path=results_path, - annotations_path=args.coco_annotations_json_path - if dataset_name == "coco" - else args.flickr_annotations_json_path, - ) - - # delete the temporary file - os.remove(results_path) - - return metrics["CIDEr"] * 100.0 - - -def evaluate_vqa( - args: argparse.Namespace, - eval_model: BaseEvalModel, - seed: int = 42, - min_generation_length: int = 0, - max_generation_length: int = 5, - num_beams: int = 3, - length_penalty: float = -2.0, - num_shots: int = 8, - dataset_name: str = "vqav2", -): - """ - Evaluate a model on VQA datasets. Currently supports VQA v2.0, OK-VQA, VizWiz and TextVQA. - - Args: - args (argparse.Namespace): arguments - eval_model (BaseEvalModel): model to evaluate - seed (int, optional): random seed. Defaults to 42. - max_generation_length (int, optional): max generation length. Defaults to 5. - num_beams (int, optional): number of beams to use for beam search. Defaults to 3. - length_penalty (float, optional): length penalty for beam search. Defaults to -2.0. - num_shots (int, optional): number of shots to use. Defaults to 8. - dataset_name (string): type of vqa dataset: currently supports vqav2, ok_vqa. Defaults to vqav2. - Returns: - float: accuracy score - """ - - if dataset_name == "ok_vqa": - train_image_dir_path = args.ok_vqa_train_image_dir_path - train_questions_json_path = args.ok_vqa_train_questions_json_path - train_annotations_json_path = args.ok_vqa_train_annotations_json_path - test_image_dir_path = args.ok_vqa_test_image_dir_path - test_questions_json_path = args.ok_vqa_test_questions_json_path - test_annotations_json_path = args.ok_vqa_test_annotations_json_path - elif dataset_name == "vqav2": - train_image_dir_path = args.vqav2_train_image_dir_path - train_questions_json_path = args.vqav2_train_questions_json_path - train_annotations_json_path = args.vqav2_train_annotations_json_path - test_image_dir_path = args.vqav2_test_image_dir_path - test_questions_json_path = args.vqav2_test_questions_json_path - test_annotations_json_path = args.vqav2_test_annotations_json_path - elif dataset_name == "vizwiz": - train_image_dir_path = args.vizwiz_train_image_dir_path - train_questions_json_path = args.vizwiz_train_questions_json_path - train_annotations_json_path = args.vizwiz_train_annotations_json_path - test_image_dir_path = args.vizwiz_test_image_dir_path - test_questions_json_path = args.vizwiz_test_questions_json_path - test_annotations_json_path = args.vizwiz_test_annotations_json_path - elif dataset_name == "textvqa": - train_image_dir_path = args.textvqa_image_dir_path - train_questions_json_path = args.textvqa_train_questions_json_path - train_annotations_json_path = args.textvqa_train_annotations_json_path - test_image_dir_path = args.textvqa_image_dir_path - test_questions_json_path = args.textvqa_test_questions_json_path - test_annotations_json_path = args.textvqa_test_annotations_json_path - else: - raise ValueError(f"Unsupported dataset: {dataset_name}") - - train_dataset = VQADataset( - image_dir_path=train_image_dir_path, - question_path=train_questions_json_path, - annotations_path=train_annotations_json_path, - is_train=True, - dataset_name=dataset_name, - ) - - test_dataset = VQADataset( - image_dir_path=test_image_dir_path, - question_path=test_questions_json_path, - annotations_path=test_annotations_json_path, - is_train=False, - dataset_name=dataset_name, - ) - - effective_num_shots = compute_effective_num_shots(num_shots, args.model) - - test_dataset = prepare_eval_samples( - test_dataset, - args.num_samples if args.num_samples > 0 else len(test_dataset), - args.batch_size, - seed, - ) - - in_context_samples = get_query_set(train_dataset, args.query_set_size, seed) - predictions = [] - - for batch in tqdm(test_dataset, desc=f"Running inference {dataset_name.upper()}"): - batch_demo_samples = sample_batch_demos_from_query_set( - in_context_samples, effective_num_shots, len(batch["image"]) - ) - - batch_images = [] - batch_text = [] - for i in range(len(batch["image"])): - if num_shots > 0: - context_images = [x["image"] for x in batch_demo_samples[i]] - else: - context_images = [] - batch_images.append(context_images + [batch["image"][i]]) - - context_text = "".join( - [ - eval_model.get_vqa_prompt( - question=x["question"], answer=x["answers"][0] - ) - for x in batch_demo_samples[i] - ] - ) - - # Keep the text but remove the image tags for the zero-shot case - if num_shots == 0: - context_text = context_text.replace("", "") - - batch_text.append( - context_text + eval_model.get_vqa_prompt(question=batch["question"][i]) - ) - - outputs = eval_model.get_outputs( - batch_images=batch_images, - batch_text=batch_text, - min_generation_length=min_generation_length, - max_generation_length=max_generation_length, - num_beams=num_beams, - length_penalty=length_penalty, - ) - - process_function = ( - postprocess_ok_vqa_generation - if dataset_name == "ok_vqa" - else postprocess_vqa_generation - ) - - new_predictions = map(process_function, outputs) - - for new_prediction, sample_id in zip(new_predictions, batch["question_id"]): - predictions.append({"answer": new_prediction, "question_id": sample_id}) - - # all gather - all_predictions = [None] * args.world_size - torch.distributed.all_gather_object(all_predictions, predictions) # list of lists - if args.rank != 0: - return - - all_predictions = [ - item for sublist in all_predictions for item in sublist - ] # flatten - - # save the predictions to a temporary file - random_uuid = str(uuid.uuid4()) - with open(f"{dataset_name}results_{random_uuid}.json", "w") as f: - f.write(json.dumps(all_predictions, indent=4)) - - if test_annotations_json_path is not None: - acc = compute_vqa_accuracy( - f"{dataset_name}results_{random_uuid}.json", - test_questions_json_path, - test_annotations_json_path, - ) - # delete the temporary file - os.remove(f"{dataset_name}results_{random_uuid}.json") - - else: - print("No annotations provided, skipping accuracy computation.") - print("Temporary file saved to:", f"{dataset_name}results_{random_uuid}.json") - acc = None - - return acc - - -def evaluate_classification( - args: argparse.Namespace, - eval_model, - seed: int = 42, - num_shots: int = 8, - use_kv_caching=False, - dataset_name: str = "imagenet", -): - """ - Evaluate a model on classification dataset. - - Args: - eval_model (BaseEvalModel): model to evaluate - imagenet_root (str): path to imagenet root for the specified split. - seed (int, optional): random seed. Defaults to 42. - num_shots (int, optional): number of shots to use. Defaults to 8. - dataset_name (str, optional): dataset name. Defaults to "imagenet". - - Returns: - float: accuracy score - """ - if args.model != "open_flamingo": - raise NotImplementedError( - "evaluate_classification is currently only supported for OpenFlamingo " - "models" - ) - batch_size = args.batch_size - num_samples = args.num_samples - np.random.seed(seed) - model, tokenizer = eval_model.model, eval_model.tokenizer - - if dataset_name == "imagenet": - train_dataset = ImageNetDataset(os.path.join(args.imagenet_root, "train")) - test_dataset = ImageNetDataset(os.path.join(args.imagenet_root, "val")) - elif dataset_name == "hateful_memes": - train_dataset = HatefulMemesDataset( - args.hateful_memes_image_dir_path, - args.hateful_memes_train_annotations_json_path, - ) - test_dataset = HatefulMemesDataset( - args.hateful_memes_image_dir_path, - args.hateful_memes_test_annotations_json_path, - ) - else: - raise ValueError(f"Unsupported dataset {dataset_name}") - - effective_num_shots = compute_effective_num_shots(num_shots, args.model) - - test_dataloader = prepare_eval_samples( - test_dataset, - args.num_samples if args.num_samples > 0 else len(test_dataset), - batch_size, - seed, - ) - - acc1 = 0 - acc5 = 0 - - if dataset_name == "imagenet": - prompt_text = "Output:" - elif dataset_name == "hateful_memes": - prompt_text = "is an image with: '{meme_text}' written on it. Is it hateful? Answer: " - - predictions = [] - - for batch_idx, batch in tqdm( - enumerate(test_dataloader), desc=f"Running inference {dataset_name}", disable=args.rank != 0 - ): - batch_images = [] - batch_text = [] - - for idx in range(len(batch["image"])): - # Choose a different set of random context samples for each sample - # from the training set - context_indices = np.random.choice( - len(train_dataset), effective_num_shots, replace=False - ) - - in_context_samples = [train_dataset[i] for i in context_indices] - - if num_shots > 0: - vision_x = [ - eval_model.image_processor(data["image"]).unsqueeze(0) - for data in in_context_samples - ] - else: - vision_x = [] - - vision_x = vision_x + [eval_model.image_processor(batch["image"][idx]).unsqueeze(0)] - batch_images.append(torch.cat(vision_x, dim=0)) - - def sample_to_prompt(sample): - if dataset_name == "hateful_memes": - return prompt_text.replace("{meme_text}", sample["ocr"]) - else: - return prompt_text - - context_text = "".join( - f"{sample_to_prompt(in_context_samples[i])}{in_context_samples[i]['class_name']}<|endofchunk|>" - for i in range(effective_num_shots) - ) - - # Keep the text but remove the image tags for the zero-shot case - if num_shots == 0: - context_text = context_text.replace("", "") - - batch_text.append(context_text) - - # shape [B, T_img, C, h, w] - vision_x = torch.stack(batch_images, dim=0) - # shape [B, T_img, 1, C, h, w] where 1 is the frame dimension - vision_x = vision_x.unsqueeze(2) - - # Cache the context text: tokenize context and prompt, - # e.g. ' a picture of a ' - text_x = [ - context_text - + sample_to_prompt({k: batch[k][idx] for k in batch.keys()}) - for idx, context_text in enumerate(batch_text) - ] - - ctx_and_prompt_tokenized = tokenizer( - text_x, - return_tensors="pt", - padding="longest", - max_length=2000, - ) - - ctx_and_prompt_input_ids = ctx_and_prompt_tokenized["input_ids"].to(eval_model.device) - ctx_and_prompt_attention_mask = ctx_and_prompt_tokenized["attention_mask"].to(eval_model.device).bool() - - def _detach_pkvs(pkvs): - """Detach a set of past key values.""" - return list([tuple([x.detach() for x in inner]) for inner in pkvs]) - - if use_kv_caching: - eval_model.cache_media(input_ids=ctx_and_prompt_input_ids, vision_x=vision_x.to(eval_model.device)) - - with torch.no_grad(): - precomputed = eval_model.model( - vision_x=None, - lang_x=ctx_and_prompt_input_ids, - attention_mask=ctx_and_prompt_attention_mask, - clear_conditioned_layers=False, - use_cache=True, - ) - - precomputed_pkvs = _detach_pkvs(precomputed.past_key_values) - precomputed_logits = precomputed.logits.detach() - else: - precomputed_pkvs = None - precomputed_logits = None - - if dataset_name == "imagenet": - all_class_names = IMAGENET_CLASSNAMES - else: - all_class_names = HM_CLASSNAMES - - if dataset_name == "imagenet": - class_id_to_name = IMAGENET_1K_CLASS_ID_TO_LABEL - else: - class_id_to_name = HM_CLASS_ID_TO_LABEL - - overall_probs = [] - for class_name in all_class_names: - past_key_values = None - # Tokenize only the class name and iteratively decode the model's - # predictions for this class. - classname_tokens = tokenizer( - class_name, add_special_tokens=False, return_tensors="pt" - )["input_ids"].to(eval_model.device) - - if classname_tokens.ndim == 1: # Case: classname is only 1 token - classname_tokens = torch.unsqueeze(classname_tokens, 1) - - classname_tokens = repeat( - classname_tokens, "b s -> (repeat b) s", repeat=len(batch_text) - ) - - if use_kv_caching: - # Compute the outputs one token at a time, using cached - # activations. - - # Initialize the elementwise predictions with the last set of - # logits from precomputed; this will correspond to the predicted - # probability of the first position/token in the imagenet - # classname. We will append the logits for each token to this - # list (each element has shape [B, 1, vocab_size]). - elementwise_logits = [precomputed_logits[:, -2:-1, :]] - - for token_idx in range(classname_tokens.shape[1]): - _lang_x = classname_tokens[:, token_idx].reshape((-1, 1)) - outputs = eval_model.get_logits( - lang_x=_lang_x, - past_key_values=( - past_key_values if token_idx > 0 else precomputed_pkvs - ), - clear_conditioned_layers=False, - ) - past_key_values = _detach_pkvs(outputs.past_key_values) - elementwise_logits.append(outputs.logits.detach()) - - # logits/probs has shape [B, classname_tokens + 1, vocab_size] - logits = torch.concat(elementwise_logits, 1) - probs = torch.softmax(logits, dim=-1) - - # collect the probability of the generated token -- probability - # at index 0 corresponds to the token at index 1. - probs = probs[:, :-1, :] # shape [B, classname_tokens, vocab_size] - - gen_probs = torch.gather(probs, 2, classname_tokens[:, :, None]).squeeze(-1).cpu() - - class_prob = torch.prod(gen_probs, 1).numpy() - else: - # Compute the outputs without using cached - # activations. - - # contatenate the class name tokens to the end of the context - # tokens - _lang_x = torch.cat([ctx_and_prompt_input_ids, classname_tokens], dim=1) - _attention_mask = torch.cat( - [ - ctx_and_prompt_attention_mask, - torch.ones_like(classname_tokens).bool(), - ], - dim=1, - ) - - outputs = eval_model.get_logits( - vision_x=vision_x.to(eval_model.device), - lang_x=_lang_x.to(eval_model.device), - attention_mask=_attention_mask.to(eval_model.device), - clear_conditioned_layers=True, - ) - - logits = outputs.logits.detach().float() - probs = torch.softmax(logits, dim=-1) - - # get probability of the generated class name tokens - gen_probs = probs[:, ctx_and_prompt_input_ids.shape[1]-1:_lang_x.shape[1], :] - gen_probs = torch.gather(gen_probs, 2, classname_tokens[:, :, None]).squeeze(-1).cpu() - class_prob = torch.prod(gen_probs, 1).numpy() - - overall_probs.append(class_prob) - - overall_probs = np.row_stack(overall_probs).T # shape [B, num_classes] - - eval_model.uncache_media() - - def topk(probs_ary: np.ndarray, k: int) -> np.ndarray: - """Return the indices of the top k elements in probs_ary.""" - return np.argsort(probs_ary)[::-1][:k] - - for i in range(len(batch_text)): - highest_prob_idxs = topk(overall_probs[i], 5) - - top5 = [class_id_to_name[pred] for pred in highest_prob_idxs] - - y_i = batch["class_name"][i] - acc5 += int(y_i in set(top5)) - acc1 += int(y_i == top5[0]) - - if dataset_name == "hateful_memes": - # sum over the probabilities of the different classes - binary_probs = [overall_probs[i][0] + overall_probs[i][3], overall_probs[i][1] + overall_probs[i][2]] - - predictions.append({ - "id": batch["id"][i], - "gt_label": y_i, - "pred_label": top5[0], - "pred_score": binary_probs[highest_prob_idxs[0]] if dataset_name == "hateful_memes" else None, # only for hateful memes - }) - - # all gather - all_predictions = [None] * args.world_size - torch.distributed.all_gather_object(all_predictions, predictions) # list of lists - if args.rank != 0: - return - - all_predictions = [ - item for sublist in all_predictions for item in sublist - ] # flatten - - # Hack to remove samples with duplicate ids (only necessary for multi-GPU evaluation) - all_predictions = {pred["id"]: pred for pred in all_predictions}.values() - - assert len(all_predictions) == len(test_dataset) # sanity check - - if dataset_name == "hateful_memes": - # return ROC-AUC score - gts = [pred["gt_label"] for pred in all_predictions] - pred_scores = [pred["pred_score"] for pred in all_predictions] - return roc_auc_score(gts, pred_scores) - else: - # return top-1 accuracy - acc1 = sum( - int(pred["gt_label"] == pred["pred_label"]) - for pred in all_predictions - ) - return float(acc1) / len(all_predictions) - -if __name__ == "__main__": - main() diff --git a/spaces/osanseviero/test/app.py b/spaces/osanseviero/test/app.py deleted file mode 100644 index ef871ec6520e510d4885700152b14e01d410827b..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/test/app.py +++ /dev/null @@ -1,17 +0,0 @@ -import gradio as gr - -from gradio.mix import Series - -description = "Generate your own D&D story!" -title = "French Story Generator using Opus MT and GPT-2" - -translator_fr = gr.Interface.load("huggingface/Helsinki-NLP/opus-mt-fr-en") - - -story_gen = gr.Interface.load("huggingface/pranavpsv/gpt2-genre-story-generator") - -translator_en = gr.Interface.load("huggingface/Helsinki-NLP/opus-mt-en-fr") - -Series(translator_fr, story_gen, translator_en, description = description, -title = title, -examples=[["L'aventurier est approché par un mystérieux étranger, pour une nouvelle quête."]]).launch() \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/dance_diffusion/__init__.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/dance_diffusion/__init__.py deleted file mode 100644 index c777d437060c3a22900d4504c430c899467b2ceb..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/dance_diffusion/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -from typing import TYPE_CHECKING - -from ...utils import _LazyModule - - -_import_structure = {"pipeline_dance_diffusion": ["DanceDiffusionPipeline"]} - -if TYPE_CHECKING: - from .pipeline_dance_diffusion import DanceDiffusionPipeline -else: - import sys - - sys.modules[__name__] = _LazyModule( - __name__, - globals()["__file__"], - _import_structure, - module_spec=__spec__, - ) diff --git a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/optimize_residuals.py b/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/optimize_residuals.py deleted file mode 100644 index aed25bdf17afc84b55a5dc71c7bcdc015f0d14ea..0000000000000000000000000000000000000000 --- a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/optimize_residuals.py +++ /dev/null @@ -1,270 +0,0 @@ -import torch, multiprocessing, itertools, os, shutil, PIL, argparse, numpy -from collections import OrderedDict -from numbers import Number -from torch.nn.functional import mse_loss, l1_loss -from . import pbar -from . import zdataset -from . import proggan, customnet, parallelfolder -from . import encoder_net, encoder_loss, setting -from torchvision import transforms, models -from torchvision.models.vgg import model_urls -from .pidfile import exit_if_job_done, mark_job_done -from . import nethook -from .pidfile import exit_if_job_done, mark_job_done -from .encoder_loss import cor_square_error -from .nethook import InstrumentedModel - -torch.backends.cudnn.benchmark = True - -parser = argparse.ArgumentParser() -parser.add_argument('--lr', type=float, help='Learning rate', default=0.01) -parser.add_argument('--image_number', type=int, help='Image number', - default=95) -parser.add_argument('--image_source', #choices=['val', 'train', 'gan', 'test'], - default='test') -parser.add_argument('--redo', type=int, help='Nonzero to delete done.txt', - default=0) -parser.add_argument('--residuals', nargs='*', help='Residuals to adjust', - default=None) -parser.add_argument('--model', type=str, help='Dataset being modeled', - default='church') -parser.add_argument('--halfsize', type=int, - help='Set to 1 for half size enoder', - default=0) -parser.add_argument('--snapshot_every', type=int, - help='only generate snapshots every n iterations', - default=1000) -args = parser.parse_args() - -num_steps = 3000 -lr_milestones = [800, 1200, 1800] -residuals = (args.residuals if args.residuals is not None - else ['layer1', 'layer2', 'layer3']) -global_seed = 1 -learning_rate = args.lr -image_number = args.image_number -expgroup = 'optimize_residuals' -# Use an explicit directory name for a different selection of residuals. -if args.residuals is not None: - expgroup += '_' + '_'.join(residuals) -imagetypecode = (dict(val='i', train='n', gan='z', test='t') - .get(args.image_source, args.image_source[0])) -expname = 'opt_%s_%d' % (imagetypecode, image_number) -expdir = os.path.join('results', args.model, expgroup, 'cases', expname) -sumdir = os.path.join('results', args.model, expgroup, - 'summary_%s' % imagetypecode) -os.makedirs(expdir, exist_ok=True) -os.makedirs(sumdir, exist_ok=True) - -# First load single image optimize (load via test ParallelFolder dataset). - -def main(): - pbar.print('Running %s' % expdir) - delete_log() - - # Grab a target image - dirname = os.path.join(expdir, 'images') - os.makedirs(dirname, exist_ok=True) - loaded_image, loaded_z = setting.load_test_image(image_number, - args.image_source, model=args.model) - visualize_results((image_number, 'target'), - loaded_image[0], summarize=True) - - # Load the pretrained generator model. - gan_generator = setting.load_proggan(args.model) - # We will wrap this model - unwrapped_H = nethook.subsequence(gan_generator, last_layer='layer4') - # Edit the output of this layer - F = nethook.subsequence(gan_generator, first_layer='layer5') - - # Load a pretrained gan inverter - encoder = nethook.InstrumentedModel( - encoder_net.HybridLayerNormEncoder(halfsize=args.halfsize)) - encoder.load_state_dict(torch.load(os.path.join('results', args.model, - 'invert_hybrid_cse/snapshots/epoch_1000.pth.tar'))['state_dict']) - encoder.eval() - E = nethook.subsequence(encoder.model, last_layer='resnet') - D = nethook.subsequence(encoder.model, first_layer='inv4') - - # Also make a conv features model from pretrained VGG - vgg = models.vgg16(pretrained=True) - VF = nethook.subsequence(vgg.features, last_layer='20') - - # Move models and data to GPU - for m in [F, unwrapped_H, E, D, VF]: - m.cuda() - - # Some constants for the GPU - with torch.no_grad(): - # Our true image is constant - true_p = loaded_image.cuda() - # Invert our image once! - init_r = E(true_p) - init_z = D(init_r) - # Compute our features once! - true_v = VF(true_p) - # For GAN-generated images we have ground truth. - if loaded_z is None: - true_z = None - true_r = None - true_r1, true_r2, true_r3 = None, None, None - else: - true_z = loaded_z.cuda() - with InstrumentedModel(unwrapped_H) as inst_H: - inst_H.retain_layers(['layer1', 'layer2', 'layer3']) - true_r = inst_H(true_z) - true_r1, true_r2, true_r3 = [inst_H.retained_layer(n) - for n in ['layer1', 'layer2', 'layer3']] - - # The model we learn are the top-level parameters of this wrapped model. - H = encoder_net.ResidualGenerator( - unwrapped_H, init_z, residuals) - H.eval() - H.cuda() - - # Set up optimizer - set_requires_grad(False, F, H, E, D, VF) - parameters = OrderedDict(H.named_parameters(recurse=False)) - for n, p in parameters.items(): - p.requires_grad = True - optimizer = torch.optim.Adam(parameters.values(), lr=learning_rate) - scheduler = torch.optim.lr_scheduler.MultiStepLR( - optimizer, milestones=lr_milestones, gamma=0.5) - - # Phase 1: find a better r4 by seeking d1, d2, d3, etc. - for step_num in pbar(range(num_steps + 1)): - current_r = H() - current_p = F(current_r) - current_v = VF(current_p) - - loss_p = l1_loss(true_p, current_p) - loss_v = l1_loss(true_v, current_v) - loss_z = H.dz.pow(2).mean() if hasattr(H, 'dz') else 0 - loss_1 = H.d1.pow(2).mean() if hasattr(H, 'd1') else 0 - loss_2 = H.d2.pow(2).mean() if hasattr(H, 'd2') else 0 - loss_3 = H.d3.pow(2).mean() if hasattr(H, 'd3') else 0 - loss_4 = H.d4.pow(2).mean() if hasattr(H, 'd4') else 0 - loss_r = mse_loss(init_r, current_r) - loss = (loss_p + loss_v + loss_z + loss_1 + loss_2 + loss_3 + loss_4) - - all_loss = dict(loss=loss, loss_v=loss_v, loss_p=loss_p, - loss_r=loss_r, - loss_z=loss_z, - loss_1=loss_1, - loss_2=loss_2, - loss_3=loss_3, - loss_4=loss_4 - ) - all_loss = { k: v.item() for k, v in all_loss.items() - if v is not 0 } - - if (step_num % args.snapshot_every == 0) or (step_num == num_steps): - with torch.no_grad(): - if true_r is not None: - all_loss['err_r'] = cor_square_error(current_r, true_r - ) * 100 - all_loss['err_p'] = (current_p - true_p).pow(2).mean() * 100 - log_progress('%d ' % step_num + ' '.join( - '%s=%.3f' % (k, all_loss[k]) - for k in sorted(all_loss.keys())), phase='a') - visualize_results((image_number, 'a', step_num), current_p, - summarize=(step_num in [0, num_steps])) - checkpoint_dict = OrderedDict(all_loss) - for s in residuals: - s = s.replace('layer', '') - checkpoint_dict['init_%s' % s] = getattr(H, 'init_' + s) - checkpoint_dict['d_%s' % s] = getattr(H, 'd' + s) - checkpoint_dict['current_%s' % s] = ( - getattr(H, 'init_' + s) + getattr(H, 'd' + s)) - save_checkpoint( - phase='a', - step=step_num, - current_r=current_r, - current_p=current_p, - true_z=true_z, - true_r=true_r, - true_p=true_p, - lr=learning_rate, - optimizer=optimizer.state_dict(), - **checkpoint_dict) - - optimizer.zero_grad() - loss.backward() - if step_num < num_steps: - optimizer.step() - scheduler.step() - -def delete_log(): - try: - os.remove(os.path.join(expdir, 'log.txt')) - except: - pass - -def log_progress(s, phase='a'): - with open(os.path.join(expdir, 'log.txt'), 'a') as f: - f.write(phase + ' ' + s + '\n') - pbar.print(s) - -def save_checkpoint(**kwargs): - dirname = os.path.join(expdir, 'snapshots') - os.makedirs(dirname, exist_ok=True) - filename = 'step_%s_%d.pth.tar' % (kwargs['phase'], kwargs['step']) - torch.save(kwargs, os.path.join(dirname, filename)) - # Also save as .mat file for analysis. - numeric_data = { - k: v.detach().cpu().numpy() if isinstance(v, torch.Tensor) else v - for k, v in kwargs.items() - if isinstance(v, (Number, numpy.ndarray, torch.Tensor))} - filename = 'step_%s_%d.npz' % (kwargs['phase'], kwargs['step']) - numpy.savez(os.path.join(dirname, filename), **numeric_data) - -def visualize_results(step, img, summarize=False): - if isinstance(step, tuple): - filename = '%s.png' % ('_'.join(str(i) for i in step)) - else: - filename = '%s.png' % str(step) - dirname = os.path.join(expdir, 'images') - os.makedirs(dirname, exist_ok=True) - save_tensor_image(img, os.path.join(dirname, filename)) - lbname = os.path.join(dirname, '+lightbox.html') - if not os.path.exists(lbname): - shutil.copy(os.path.join(os.path.dirname(__file__), - 'lightbox.html'), lbname) - if summarize: - save_tensor_image(img, os.path.join(sumdir, filename)) - lbname = os.path.join(sumdir, '+lightbox.html') - if not os.path.exists(lbname): - shutil.copy(os.path.join(os.path.dirname(__file__), - 'lightbox.html'), lbname) - -def save_tensor_image(img, filename): - if len(img.shape) == 4: - img = img[0] - np_data = ((img.permute(1, 2, 0) / 2 + 0.5) * 255 - ).clamp(0, 255).byte().cpu().numpy() - PIL.Image.fromarray(np_data).save(filename) - -def set_requires_grad(requires_grad, *models): - for model in models: - if isinstance(model, torch.nn.Module): - for param in model.parameters(): - param.requires_grad = requires_grad - elif isintance(model, torch.nn.Parameter): - model.requires_grad = requires_grad - else: - assert False, 'unknown type %r' % type(model) - -def edit(x): - x = x.clone() - x[:,EDIT_UNITS] = 0 - return x - -#unit_level99 = {} -#for cls in ablation_units: -# corpus = numpy.load('reltest/churchoutdoor/layer4/ace/%s/corpus.npz' % cls) - - -if __name__ == '__main__': - exit_if_job_done(expdir, redo=args.redo) - main() - mark_job_done(expdir) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/compat.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/compat.py deleted file mode 100644 index 1fe3d225acb9bf37acffafc2198dc96c7c7fd313..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/compat.py +++ /dev/null @@ -1,1116 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013-2017 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -from __future__ import absolute_import - -import os -import re -import sys - -try: - import ssl -except ImportError: # pragma: no cover - ssl = None - -if sys.version_info[0] < 3: # pragma: no cover - from StringIO import StringIO - string_types = basestring, - text_type = unicode - from types import FileType as file_type - import __builtin__ as builtins - import ConfigParser as configparser - from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit - from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, - pathname2url, ContentTooShortError, splittype) - - def quote(s): - if isinstance(s, unicode): - s = s.encode('utf-8') - return _quote(s) - - import urllib2 - from urllib2 import (Request, urlopen, URLError, HTTPError, - HTTPBasicAuthHandler, HTTPPasswordMgr, - HTTPHandler, HTTPRedirectHandler, - build_opener) - if ssl: - from urllib2 import HTTPSHandler - import httplib - import xmlrpclib - import Queue as queue - from HTMLParser import HTMLParser - import htmlentitydefs - raw_input = raw_input - from itertools import ifilter as filter - from itertools import ifilterfalse as filterfalse - - # Leaving this around for now, in case it needs resurrecting in some way - # _userprog = None - # def splituser(host): - # """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" - # global _userprog - # if _userprog is None: - # import re - # _userprog = re.compile('^(.*)@(.*)$') - - # match = _userprog.match(host) - # if match: return match.group(1, 2) - # return None, host - -else: # pragma: no cover - from io import StringIO - string_types = str, - text_type = str - from io import TextIOWrapper as file_type - import builtins - import configparser - import shutil - from urllib.parse import (urlparse, urlunparse, urljoin, quote, - unquote, urlsplit, urlunsplit, splittype) - from urllib.request import (urlopen, urlretrieve, Request, url2pathname, - pathname2url, - HTTPBasicAuthHandler, HTTPPasswordMgr, - HTTPHandler, HTTPRedirectHandler, - build_opener) - if ssl: - from urllib.request import HTTPSHandler - from urllib.error import HTTPError, URLError, ContentTooShortError - import http.client as httplib - import urllib.request as urllib2 - import xmlrpc.client as xmlrpclib - import queue - from html.parser import HTMLParser - import html.entities as htmlentitydefs - raw_input = input - from itertools import filterfalse - filter = filter - - -try: - from ssl import match_hostname, CertificateError -except ImportError: # pragma: no cover - class CertificateError(ValueError): - pass - - - def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - parts = dn.split('.') - leftmost, remainder = parts[0], parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - - def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") - - -try: - from types import SimpleNamespace as Container -except ImportError: # pragma: no cover - class Container(object): - """ - A generic container for when multiple values need to be returned - """ - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - -try: - from shutil import which -except ImportError: # pragma: no cover - # Implementation from Python 3.3 - def which(cmd, mode=os.F_OK | os.X_OK, path=None): - """Given a command, mode, and a PATH string, return the path which - conforms to the given mode on the PATH, or None if there is no such - file. - - `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result - of os.environ.get("PATH"), or can be overridden with a custom search - path. - - """ - # Check that a given file can be accessed with the correct mode. - # Additionally check that `file` is not a directory, as on Windows - # directories pass the os.access check. - def _access_check(fn, mode): - return (os.path.exists(fn) and os.access(fn, mode) - and not os.path.isdir(fn)) - - # If we're given a path with a directory part, look it up directly rather - # than referring to PATH directories. This includes checking relative to the - # current directory, e.g. ./script - if os.path.dirname(cmd): - if _access_check(cmd, mode): - return cmd - return None - - if path is None: - path = os.environ.get("PATH", os.defpath) - if not path: - return None - path = path.split(os.pathsep) - - if sys.platform == "win32": - # The current directory takes precedence on Windows. - if not os.curdir in path: - path.insert(0, os.curdir) - - # PATHEXT is necessary to check on Windows. - pathext = os.environ.get("PATHEXT", "").split(os.pathsep) - # See if the given file matches any of the expected path extensions. - # This will allow us to short circuit when given "python.exe". - # If it does match, only test that one, otherwise we have to try - # others. - if any(cmd.lower().endswith(ext.lower()) for ext in pathext): - files = [cmd] - else: - files = [cmd + ext for ext in pathext] - else: - # On other platforms you don't have things like PATHEXT to tell you - # what file suffixes are executable, so just pass on cmd as-is. - files = [cmd] - - seen = set() - for dir in path: - normdir = os.path.normcase(dir) - if not normdir in seen: - seen.add(normdir) - for thefile in files: - name = os.path.join(dir, thefile) - if _access_check(name, mode): - return name - return None - - -# ZipFile is a context manager in 2.7, but not in 2.6 - -from zipfile import ZipFile as BaseZipFile - -if hasattr(BaseZipFile, '__enter__'): # pragma: no cover - ZipFile = BaseZipFile -else: # pragma: no cover - from zipfile import ZipExtFile as BaseZipExtFile - - class ZipExtFile(BaseZipExtFile): - def __init__(self, base): - self.__dict__.update(base.__dict__) - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - # return None, so if an exception occurred, it will propagate - - class ZipFile(BaseZipFile): - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - # return None, so if an exception occurred, it will propagate - - def open(self, *args, **kwargs): - base = BaseZipFile.open(self, *args, **kwargs) - return ZipExtFile(base) - -try: - from platform import python_implementation -except ImportError: # pragma: no cover - def python_implementation(): - """Return a string identifying the Python implementation.""" - if 'PyPy' in sys.version: - return 'PyPy' - if os.name == 'java': - return 'Jython' - if sys.version.startswith('IronPython'): - return 'IronPython' - return 'CPython' - -import shutil -import sysconfig - -try: - callable = callable -except NameError: # pragma: no cover - from collections.abc import Callable - - def callable(obj): - return isinstance(obj, Callable) - - -try: - fsencode = os.fsencode - fsdecode = os.fsdecode -except AttributeError: # pragma: no cover - # Issue #99: on some systems (e.g. containerised), - # sys.getfilesystemencoding() returns None, and we need a real value, - # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and - # sys.getfilesystemencoding(): the return value is "the user’s preference - # according to the result of nl_langinfo(CODESET), or None if the - # nl_langinfo(CODESET) failed." - _fsencoding = sys.getfilesystemencoding() or 'utf-8' - if _fsencoding == 'mbcs': - _fserrors = 'strict' - else: - _fserrors = 'surrogateescape' - - def fsencode(filename): - if isinstance(filename, bytes): - return filename - elif isinstance(filename, text_type): - return filename.encode(_fsencoding, _fserrors) - else: - raise TypeError("expect bytes or str, not %s" % - type(filename).__name__) - - def fsdecode(filename): - if isinstance(filename, text_type): - return filename - elif isinstance(filename, bytes): - return filename.decode(_fsencoding, _fserrors) - else: - raise TypeError("expect bytes or str, not %s" % - type(filename).__name__) - -try: - from tokenize import detect_encoding -except ImportError: # pragma: no cover - from codecs import BOM_UTF8, lookup - import re - - cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") - - def _get_normal_name(orig_enc): - """Imitates get_normal_name in tokenizer.c.""" - # Only care about the first 12 characters. - enc = orig_enc[:12].lower().replace("_", "-") - if enc == "utf-8" or enc.startswith("utf-8-"): - return "utf-8" - if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ - enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): - return "iso-8859-1" - return orig_enc - - def detect_encoding(readline): - """ - The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argument, readline, - in the same way as the tokenize() generator. - - It will call readline a maximum of twice, and return the encoding used - (as a string) and a list of any lines (left as bytes) it has read in. - - It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, - but disagree, a SyntaxError will be raised. If the encoding cookie is an - invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, - 'utf-8-sig' is returned. - - If no encoding is specified, then the default of 'utf-8' will be returned. - """ - try: - filename = readline.__self__.name - except AttributeError: - filename = None - bom_found = False - encoding = None - default = 'utf-8' - def read_or_stop(): - try: - return readline() - except StopIteration: - return b'' - - def find_cookie(line): - try: - # Decode as UTF-8. Either the line is an encoding declaration, - # in which case it should be pure ASCII, or it must be UTF-8 - # per default encoding. - line_string = line.decode('utf-8') - except UnicodeDecodeError: - msg = "invalid or missing encoding declaration" - if filename is not None: - msg = '{} for {!r}'.format(msg, filename) - raise SyntaxError(msg) - - matches = cookie_re.findall(line_string) - if not matches: - return None - encoding = _get_normal_name(matches[0]) - try: - codec = lookup(encoding) - except LookupError: - # This behaviour mimics the Python interpreter - if filename is None: - msg = "unknown encoding: " + encoding - else: - msg = "unknown encoding for {!r}: {}".format(filename, - encoding) - raise SyntaxError(msg) - - if bom_found: - if codec.name != 'utf-8': - # This behaviour mimics the Python interpreter - if filename is None: - msg = 'encoding problem: utf-8' - else: - msg = 'encoding problem for {!r}: utf-8'.format(filename) - raise SyntaxError(msg) - encoding += '-sig' - return encoding - - first = read_or_stop() - if first.startswith(BOM_UTF8): - bom_found = True - first = first[3:] - default = 'utf-8-sig' - if not first: - return default, [] - - encoding = find_cookie(first) - if encoding: - return encoding, [first] - - second = read_or_stop() - if not second: - return default, [first] - - encoding = find_cookie(second) - if encoding: - return encoding, [first, second] - - return default, [first, second] - -# For converting & <-> & etc. -try: - from html import escape -except ImportError: - from cgi import escape -if sys.version_info[:2] < (3, 4): - unescape = HTMLParser().unescape -else: - from html import unescape - -try: - from collections import ChainMap -except ImportError: # pragma: no cover - from collections import MutableMapping - - try: - from reprlib import recursive_repr as _recursive_repr - except ImportError: - def _recursive_repr(fillvalue='...'): - ''' - Decorator to make a repr function return fillvalue for a recursive - call - ''' - - def decorating_function(user_function): - repr_running = set() - - def wrapper(self): - key = id(self), get_ident() - if key in repr_running: - return fillvalue - repr_running.add(key) - try: - result = user_function(self) - finally: - repr_running.discard(key) - return result - - # Can't use functools.wraps() here because of bootstrap issues - wrapper.__module__ = getattr(user_function, '__module__') - wrapper.__doc__ = getattr(user_function, '__doc__') - wrapper.__name__ = getattr(user_function, '__name__') - wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) - return wrapper - - return decorating_function - - class ChainMap(MutableMapping): - ''' A ChainMap groups multiple dicts (or other mappings) together - to create a single, updateable view. - - The underlying mappings are stored in a list. That list is public and can - accessed or updated using the *maps* attribute. There is no other state. - - Lookups search the underlying mappings successively until a key is found. - In contrast, writes, updates, and deletions only operate on the first - mapping. - - ''' - - def __init__(self, *maps): - '''Initialize a ChainMap by setting *maps* to the given mappings. - If no mappings are provided, a single empty dictionary is used. - - ''' - self.maps = list(maps) or [{}] # always at least one map - - def __missing__(self, key): - raise KeyError(key) - - def __getitem__(self, key): - for mapping in self.maps: - try: - return mapping[key] # can't use 'key in mapping' with defaultdict - except KeyError: - pass - return self.__missing__(key) # support subclasses that define __missing__ - - def get(self, key, default=None): - return self[key] if key in self else default - - def __len__(self): - return len(set().union(*self.maps)) # reuses stored hash values if possible - - def __iter__(self): - return iter(set().union(*self.maps)) - - def __contains__(self, key): - return any(key in m for m in self.maps) - - def __bool__(self): - return any(self.maps) - - @_recursive_repr() - def __repr__(self): - return '{0.__class__.__name__}({1})'.format( - self, ', '.join(map(repr, self.maps))) - - @classmethod - def fromkeys(cls, iterable, *args): - 'Create a ChainMap with a single dict created from the iterable.' - return cls(dict.fromkeys(iterable, *args)) - - def copy(self): - 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' - return self.__class__(self.maps[0].copy(), *self.maps[1:]) - - __copy__ = copy - - def new_child(self): # like Django's Context.push() - 'New ChainMap with a new dict followed by all previous maps.' - return self.__class__({}, *self.maps) - - @property - def parents(self): # like Django's Context.pop() - 'New ChainMap from maps[1:].' - return self.__class__(*self.maps[1:]) - - def __setitem__(self, key, value): - self.maps[0][key] = value - - def __delitem__(self, key): - try: - del self.maps[0][key] - except KeyError: - raise KeyError('Key not found in the first mapping: {!r}'.format(key)) - - def popitem(self): - 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' - try: - return self.maps[0].popitem() - except KeyError: - raise KeyError('No keys found in the first mapping.') - - def pop(self, key, *args): - 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' - try: - return self.maps[0].pop(key, *args) - except KeyError: - raise KeyError('Key not found in the first mapping: {!r}'.format(key)) - - def clear(self): - 'Clear maps[0], leaving maps[1:] intact.' - self.maps[0].clear() - -try: - from importlib.util import cache_from_source # Python >= 3.4 -except ImportError: # pragma: no cover - def cache_from_source(path, debug_override=None): - assert path.endswith('.py') - if debug_override is None: - debug_override = __debug__ - if debug_override: - suffix = 'c' - else: - suffix = 'o' - return path + suffix - -try: - from collections import OrderedDict -except ImportError: # pragma: no cover -## {{{ http://code.activestate.com/recipes/576693/ (r9) -# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. -# Passes Python2.7's test suite and incorporates all the latest updates. - try: - from thread import get_ident as _get_ident - except ImportError: - from dummy_thread import get_ident as _get_ident - - try: - from _abcoll import KeysView, ValuesView, ItemsView - except ImportError: - pass - - - class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as for regular dictionaries. - - # The internal self.__map dictionary maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # Each link is stored as a list of length three: [PREV, NEXT, KEY]. - - def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. Signature is the same as for - regular dictionaries, but keyword arguments are not recommended - because their insertion order is arbitrary. - - ''' - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__root = root = [] # sentinel node - root[:] = [root, root, None] - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, dict_setitem=dict.__setitem__): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link which goes at the end of the linked - # list, and the inherited dictionary is updated with the new key/value pair. - if key not in self: - root = self.__root - last = root[0] - last[1] = root[0] = self.__map[key] = [last, root, key] - dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which is - # then removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link_prev, link_next, key = self.__map.pop(key) - link_prev[1] = link_next - link_next[0] = link_prev - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - root = self.__root - curr = root[1] - while curr is not root: - yield curr[2] - curr = curr[1] - - def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - root = self.__root - curr = root[0] - while curr is not root: - yield curr[2] - curr = curr[0] - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - try: - for node in self.__map.itervalues(): - del node[:] - root = self.__root - root[:] = [root, root, None] - self.__map.clear() - except AttributeError: - pass - dict.clear(self) - - def popitem(self, last=True): - '''od.popitem() -> (k, v), return and remove a (key, value) pair. - Pairs are returned in LIFO order if last is true or FIFO order if false. - - ''' - if not self: - raise KeyError('dictionary is empty') - root = self.__root - if last: - link = root[0] - link_prev = link[0] - link_prev[1] = root - root[0] = link_prev - else: - link = root[1] - link_next = link[1] - root[1] = link_next - link_next[0] = root - key = link[2] - del self.__map[key] - value = dict.pop(self, key) - return key, value - - # -- the following methods do not depend on the internal structure -- - - def keys(self): - 'od.keys() -> list of keys in od' - return list(self) - - def values(self): - 'od.values() -> list of values in od' - return [self[key] for key in self] - - def items(self): - 'od.items() -> list of (key, value) pairs in od' - return [(key, self[key]) for key in self] - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) items in od' - for k in self: - yield (k, self[k]) - - def update(*args, **kwds): - '''od.update(E, **F) -> None. Update od from dict/iterable E and F. - - If E is a dict instance, does: for k in E: od[k] = E[k] - If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] - Or if E is an iterable of items, does: for k, v in E: od[k] = v - In either case, this is followed by: for k, v in F.items(): od[k] = v - - ''' - if len(args) > 2: - raise TypeError('update() takes at most 2 positional ' - 'arguments (%d given)' % (len(args),)) - elif not args: - raise TypeError('update() takes at least 1 argument (0 given)') - self = args[0] - # Make progressively weaker assumptions about "other" - other = () - if len(args) == 2: - other = args[1] - if isinstance(other, dict): - for key in other: - self[key] = other[key] - elif hasattr(other, 'keys'): - for key in other.keys(): - self[key] = other[key] - else: - for key, value in other: - self[key] = value - for key, value in kwds.items(): - self[key] = value - - __update = update # let subclasses override update without breaking __init__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default - - def __repr__(self, _repr_running=None): - 'od.__repr__() <==> repr(od)' - if not _repr_running: _repr_running = {} - call_key = id(self), _get_ident() - if call_key in _repr_running: - return '...' - _repr_running[call_key] = 1 - try: - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - finally: - del _repr_running[call_key] - - def __reduce__(self): - 'Return state information for pickling' - items = [[k, self[k]] for k in self] - inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def copy(self): - 'od.copy() -> a shallow copy of od' - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S - and values equal to v (which defaults to None). - - ''' - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive - while comparison to a regular mapping is order-insensitive. - - ''' - if isinstance(other, OrderedDict): - return len(self)==len(other) and self.items() == other.items() - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other - - # -- the following methods are only used in Python 2.7 -- - - def viewkeys(self): - "od.viewkeys() -> a set-like object providing a view on od's keys" - return KeysView(self) - - def viewvalues(self): - "od.viewvalues() -> an object providing a view on od's values" - return ValuesView(self) - - def viewitems(self): - "od.viewitems() -> a set-like object providing a view on od's items" - return ItemsView(self) - -try: - from logging.config import BaseConfigurator, valid_ident -except ImportError: # pragma: no cover - IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) - - - def valid_ident(s): - m = IDENTIFIER.match(s) - if not m: - raise ValueError('Not a valid Python identifier: %r' % s) - return True - - - # The ConvertingXXX classes are wrappers around standard Python containers, - # and they serve to convert any suitable values in the container. The - # conversion converts base dicts, lists and tuples to their wrapped - # equivalents, whereas strings which match a conversion format are converted - # appropriately. - # - # Each wrapper should have a configurator attribute holding the actual - # configurator to use for conversion. - - class ConvertingDict(dict): - """A converting dictionary wrapper.""" - - def __getitem__(self, key): - value = dict.__getitem__(self, key) - result = self.configurator.convert(value) - #If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def get(self, key, default=None): - value = dict.get(self, key, default) - result = self.configurator.convert(value) - #If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, key, default=None): - value = dict.pop(self, key, default) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - class ConvertingList(list): - """A converting list wrapper.""" - def __getitem__(self, key): - value = list.__getitem__(self, key) - result = self.configurator.convert(value) - #If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, idx=-1): - value = list.pop(self, idx) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - return result - - class ConvertingTuple(tuple): - """A converting tuple wrapper.""" - def __getitem__(self, key): - value = tuple.__getitem__(self, key) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - class BaseConfigurator(object): - """ - The configurator base class which defines some useful defaults. - """ - - CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') - - WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') - DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') - INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') - DIGIT_PATTERN = re.compile(r'^\d+$') - - value_converters = { - 'ext' : 'ext_convert', - 'cfg' : 'cfg_convert', - } - - # We might want to use a different one, e.g. importlib - importer = staticmethod(__import__) - - def __init__(self, config): - self.config = ConvertingDict(config) - self.config.configurator = self - - def resolve(self, s): - """ - Resolve strings to objects using standard import and attribute - syntax. - """ - name = s.split('.') - used = name.pop(0) - try: - found = self.importer(used) - for frag in name: - used += '.' + frag - try: - found = getattr(found, frag) - except AttributeError: - self.importer(used) - found = getattr(found, frag) - return found - except ImportError: - e, tb = sys.exc_info()[1:] - v = ValueError('Cannot resolve %r: %s' % (s, e)) - v.__cause__, v.__traceback__ = e, tb - raise v - - def ext_convert(self, value): - """Default converter for the ext:// protocol.""" - return self.resolve(value) - - def cfg_convert(self, value): - """Default converter for the cfg:// protocol.""" - rest = value - m = self.WORD_PATTERN.match(rest) - if m is None: - raise ValueError("Unable to convert %r" % value) - else: - rest = rest[m.end():] - d = self.config[m.groups()[0]] - #print d, rest - while rest: - m = self.DOT_PATTERN.match(rest) - if m: - d = d[m.groups()[0]] - else: - m = self.INDEX_PATTERN.match(rest) - if m: - idx = m.groups()[0] - if not self.DIGIT_PATTERN.match(idx): - d = d[idx] - else: - try: - n = int(idx) # try as number first (most likely) - d = d[n] - except TypeError: - d = d[idx] - if m: - rest = rest[m.end():] - else: - raise ValueError('Unable to convert ' - '%r at %r' % (value, rest)) - #rest should be empty - return d - - def convert(self, value): - """ - Convert values to an appropriate type. dicts, lists and tuples are - replaced by their converting alternatives. Strings are checked to - see if they have a conversion format and are converted if they do. - """ - if not isinstance(value, ConvertingDict) and isinstance(value, dict): - value = ConvertingDict(value) - value.configurator = self - elif not isinstance(value, ConvertingList) and isinstance(value, list): - value = ConvertingList(value) - value.configurator = self - elif not isinstance(value, ConvertingTuple) and\ - isinstance(value, tuple): - value = ConvertingTuple(value) - value.configurator = self - elif isinstance(value, string_types): - m = self.CONVERT_PATTERN.match(value) - if m: - d = m.groupdict() - prefix = d['prefix'] - converter = self.value_converters.get(prefix, None) - if converter: - suffix = d['suffix'] - converter = getattr(self, converter) - value = converter(suffix) - return value - - def configure_custom(self, config): - """Configure an object with a user-supplied factory.""" - c = config.pop('()') - if not callable(c): - c = self.resolve(c) - props = config.pop('.', None) - # Check for valid identifiers - kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) - result = c(**kwargs) - if props: - for name, value in props.items(): - setattr(result, name, value) - return result - - def as_tuple(self, value): - """Utility function which converts lists to tuples.""" - if isinstance(value, list): - value = tuple(value) - return value diff --git a/spaces/plzdontcry/dakubettergpt/src/components/LanguageSelector/index.ts b/spaces/plzdontcry/dakubettergpt/src/components/LanguageSelector/index.ts deleted file mode 100644 index 30a89c4f00b8e0e011dde56b0009e25f9e05d4c0..0000000000000000000000000000000000000000 --- a/spaces/plzdontcry/dakubettergpt/src/components/LanguageSelector/index.ts +++ /dev/null @@ -1 +0,0 @@ -export { default } from './LanguageSelector'; diff --git a/spaces/prabhu46/registerandlogin/README.md b/spaces/prabhu46/registerandlogin/README.md deleted file mode 100644 index 21479ffa840ba493cf3473af767f98cf204e8e1e..0000000000000000000000000000000000000000 --- a/spaces/prabhu46/registerandlogin/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Registerandlogin -emoji: 📚 -colorFrom: pink -colorTo: pink -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/prerna9811/Chord/portaudio/src/common/pa_ringbuffer.h b/spaces/prerna9811/Chord/portaudio/src/common/pa_ringbuffer.h deleted file mode 100644 index 400aaac659b33134378a316288979f27d25b929f..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/src/common/pa_ringbuffer.h +++ /dev/null @@ -1,236 +0,0 @@ -#ifndef PA_RINGBUFFER_H -#define PA_RINGBUFFER_H -/* - * $Id$ - * Portable Audio I/O Library - * Ring Buffer utility. - * - * Author: Phil Burk, http://www.softsynth.com - * modified for SMP safety on OS X by Bjorn Roche. - * also allowed for const where possible. - * modified for multiple-byte-sized data elements by Sven Fischer - * - * Note that this is safe only for a single-thread reader - * and a single-thread writer. - * - * This program is distributed with the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/** @file - @ingroup common_src - @brief Single-reader single-writer lock-free ring buffer - - PaUtilRingBuffer is a ring buffer used to transport samples between - different execution contexts (threads, OS callbacks, interrupt handlers) - without requiring the use of any locks. This only works when there is - a single reader and a single writer (ie. one thread or callback writes - to the ring buffer, another thread or callback reads from it). - - The PaUtilRingBuffer structure manages a ring buffer containing N - elements, where N must be a power of two. An element may be any size - (specified in bytes). - - The memory area used to store the buffer elements must be allocated by - the client prior to calling PaUtil_InitializeRingBuffer() and must outlive - the use of the ring buffer. - - @note The ring buffer functions are not normally exposed in the PortAudio libraries. - If you want to call them then you will need to add pa_ringbuffer.c to your application source code. -*/ - -#if defined(__APPLE__) -#include -typedef int32_t ring_buffer_size_t; -#elif defined( __GNUC__ ) -typedef long ring_buffer_size_t; -#elif (_MSC_VER >= 1400) -typedef long ring_buffer_size_t; -#elif defined(_MSC_VER) || defined(__BORLANDC__) -typedef long ring_buffer_size_t; -#else -typedef long ring_buffer_size_t; -#endif - - - -#ifdef __cplusplus -extern "C" -{ -#endif /* __cplusplus */ - -typedef struct PaUtilRingBuffer -{ - ring_buffer_size_t bufferSize; /**< Number of elements in FIFO. Power of 2. Set by PaUtil_InitRingBuffer. */ - volatile ring_buffer_size_t writeIndex; /**< Index of next writable element. Set by PaUtil_AdvanceRingBufferWriteIndex. */ - volatile ring_buffer_size_t readIndex; /**< Index of next readable element. Set by PaUtil_AdvanceRingBufferReadIndex. */ - ring_buffer_size_t bigMask; /**< Used for wrapping indices with extra bit to distinguish full/empty. */ - ring_buffer_size_t smallMask; /**< Used for fitting indices to buffer. */ - ring_buffer_size_t elementSizeBytes; /**< Number of bytes per element. */ - char *buffer; /**< Pointer to the buffer containing the actual data. */ -}PaUtilRingBuffer; - -/** Initialize Ring Buffer to empty state ready to have elements written to it. - - @param rbuf The ring buffer. - - @param elementSizeBytes The size of a single data element in bytes. - - @param elementCount The number of elements in the buffer (must be a power of 2). - - @param dataPtr A pointer to a previously allocated area where the data - will be maintained. It must be elementCount*elementSizeBytes long. - - @return -1 if elementCount is not a power of 2, otherwise 0. -*/ -ring_buffer_size_t PaUtil_InitializeRingBuffer( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementSizeBytes, ring_buffer_size_t elementCount, void *dataPtr ); - -/** Reset buffer to empty. Should only be called when buffer is NOT being read or written. - - @param rbuf The ring buffer. -*/ -void PaUtil_FlushRingBuffer( PaUtilRingBuffer *rbuf ); - -/** Retrieve the number of elements available in the ring buffer for writing. - - @param rbuf The ring buffer. - - @return The number of elements available for writing. -*/ -ring_buffer_size_t PaUtil_GetRingBufferWriteAvailable( const PaUtilRingBuffer *rbuf ); - -/** Retrieve the number of elements available in the ring buffer for reading. - - @param rbuf The ring buffer. - - @return The number of elements available for reading. -*/ -ring_buffer_size_t PaUtil_GetRingBufferReadAvailable( const PaUtilRingBuffer *rbuf ); - -/** Write data to the ring buffer. - - @param rbuf The ring buffer. - - @param data The address of new data to write to the buffer. - - @param elementCount The number of elements to be written. - - @return The number of elements written. -*/ -ring_buffer_size_t PaUtil_WriteRingBuffer( PaUtilRingBuffer *rbuf, const void *data, ring_buffer_size_t elementCount ); - -/** Read data from the ring buffer. - - @param rbuf The ring buffer. - - @param data The address where the data should be stored. - - @param elementCount The number of elements to be read. - - @return The number of elements read. -*/ -ring_buffer_size_t PaUtil_ReadRingBuffer( PaUtilRingBuffer *rbuf, void *data, ring_buffer_size_t elementCount ); - -/** Get address of region(s) to which we can write data. - - @param rbuf The ring buffer. - - @param elementCount The number of elements desired. - - @param dataPtr1 The address where the first (or only) region pointer will be - stored. - - @param sizePtr1 The address where the first (or only) region length will be - stored. - - @param dataPtr2 The address where the second region pointer will be stored if - the first region is too small to satisfy elementCount. - - @param sizePtr2 The address where the second region length will be stored if - the first region is too small to satisfy elementCount. - - @return The room available to be written or elementCount, whichever is smaller. -*/ -ring_buffer_size_t PaUtil_GetRingBufferWriteRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount, - void **dataPtr1, ring_buffer_size_t *sizePtr1, - void **dataPtr2, ring_buffer_size_t *sizePtr2 ); - -/** Advance the write index to the next location to be written. - - @param rbuf The ring buffer. - - @param elementCount The number of elements to advance. - - @return The new position. -*/ -ring_buffer_size_t PaUtil_AdvanceRingBufferWriteIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount ); - -/** Get address of region(s) from which we can read data. - - @param rbuf The ring buffer. - - @param elementCount The number of elements desired. - - @param dataPtr1 The address where the first (or only) region pointer will be - stored. - - @param sizePtr1 The address where the first (or only) region length will be - stored. - - @param dataPtr2 The address where the second region pointer will be stored if - the first region is too small to satisfy elementCount. - - @param sizePtr2 The address where the second region length will be stored if - the first region is too small to satisfy elementCount. - - @return The number of elements available for reading. -*/ -ring_buffer_size_t PaUtil_GetRingBufferReadRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount, - void **dataPtr1, ring_buffer_size_t *sizePtr1, - void **dataPtr2, ring_buffer_size_t *sizePtr2 ); - -/** Advance the read index to the next location to be read. - - @param rbuf The ring buffer. - - @param elementCount The number of elements to advance. - - @return The new position. -*/ -ring_buffer_size_t PaUtil_AdvanceRingBufferReadIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount ); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ -#endif /* PA_RINGBUFFER_H */ diff --git a/spaces/prerna9811/Chord/portaudio/src/os/win/pa_win_waveformat.c b/spaces/prerna9811/Chord/portaudio/src/os/win/pa_win_waveformat.c deleted file mode 100644 index 0436a399ba564993204afd9bef29c11864cc8488..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/src/os/win/pa_win_waveformat.c +++ /dev/null @@ -1,163 +0,0 @@ -/* - * PortAudio Portable Real-Time Audio Library - * Windows WAVEFORMAT* data structure utilities - * portaudio.h should be included before this file. - * - * Copyright (c) 2007 Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP) - #include /* for WAVEFORMATEX */ -#endif - -#include "portaudio.h" -#include "pa_win_waveformat.h" - - -#if !defined(WAVE_FORMAT_EXTENSIBLE) -#define WAVE_FORMAT_EXTENSIBLE 0xFFFE -#endif - - -static GUID pawin_ksDataFormatSubtypeGuidBase = - { (USHORT)(WAVE_FORMAT_PCM), 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 }; - - -int PaWin_SampleFormatToLinearWaveFormatTag( PaSampleFormat sampleFormat ) -{ - if( sampleFormat == paFloat32 ) - return PAWIN_WAVE_FORMAT_IEEE_FLOAT; - - return PAWIN_WAVE_FORMAT_PCM; -} - - -void PaWin_InitializeWaveFormatEx( PaWinWaveFormat *waveFormat, - int numChannels, PaSampleFormat sampleFormat, int waveFormatTag, double sampleRate ) -{ - WAVEFORMATEX *waveFormatEx = (WAVEFORMATEX*)waveFormat; - int bytesPerSample = Pa_GetSampleSize(sampleFormat); - unsigned long bytesPerFrame = numChannels * bytesPerSample; - - waveFormatEx->wFormatTag = waveFormatTag; - waveFormatEx->nChannels = (WORD)numChannels; - waveFormatEx->nSamplesPerSec = (DWORD)sampleRate; - waveFormatEx->nAvgBytesPerSec = waveFormatEx->nSamplesPerSec * bytesPerFrame; - waveFormatEx->nBlockAlign = (WORD)bytesPerFrame; - waveFormatEx->wBitsPerSample = bytesPerSample * 8; - waveFormatEx->cbSize = 0; -} - - -void PaWin_InitializeWaveFormatExtensible( PaWinWaveFormat *waveFormat, - int numChannels, PaSampleFormat sampleFormat, int waveFormatTag, double sampleRate, - PaWinWaveFormatChannelMask channelMask ) -{ - WAVEFORMATEX *waveFormatEx = (WAVEFORMATEX*)waveFormat; - int bytesPerSample = Pa_GetSampleSize(sampleFormat); - unsigned long bytesPerFrame = numChannels * bytesPerSample; - GUID guid; - - waveFormatEx->wFormatTag = WAVE_FORMAT_EXTENSIBLE; - waveFormatEx->nChannels = (WORD)numChannels; - waveFormatEx->nSamplesPerSec = (DWORD)sampleRate; - waveFormatEx->nAvgBytesPerSec = waveFormatEx->nSamplesPerSec * bytesPerFrame; - waveFormatEx->nBlockAlign = (WORD)bytesPerFrame; - waveFormatEx->wBitsPerSample = bytesPerSample * 8; - waveFormatEx->cbSize = 22; - - memcpy(&waveFormat->fields[PAWIN_INDEXOF_WVALIDBITSPERSAMPLE], - &waveFormatEx->wBitsPerSample, sizeof(WORD)); - - memcpy(&waveFormat->fields[PAWIN_INDEXOF_DWCHANNELMASK], - &channelMask, sizeof(DWORD)); - - guid = pawin_ksDataFormatSubtypeGuidBase; - guid.Data1 = (USHORT)waveFormatTag; - memcpy(&waveFormat->fields[PAWIN_INDEXOF_SUBFORMAT], &guid, sizeof(GUID)); -} - -PaWinWaveFormatChannelMask PaWin_DefaultChannelMask( int numChannels ) -{ - switch( numChannels ){ - case 1: - return PAWIN_SPEAKER_MONO; - case 2: - return PAWIN_SPEAKER_STEREO; - case 3: - return PAWIN_SPEAKER_FRONT_LEFT | PAWIN_SPEAKER_FRONT_CENTER | PAWIN_SPEAKER_FRONT_RIGHT; - case 4: - return PAWIN_SPEAKER_QUAD; - case 5: - return PAWIN_SPEAKER_QUAD | PAWIN_SPEAKER_FRONT_CENTER; - case 6: - /* The meaning of the PAWIN_SPEAKER_5POINT1 flag has changed over time: - http://msdn2.microsoft.com/en-us/library/aa474707.aspx - We use PAWIN_SPEAKER_5POINT1 (not PAWIN_SPEAKER_5POINT1_SURROUND) - because on some cards (eg Audigy) PAWIN_SPEAKER_5POINT1_SURROUND - results in a virtual mixdown placing the rear output in the - front _and_ rear speakers. - */ - return PAWIN_SPEAKER_5POINT1; - /* case 7: */ - case 8: - /* RoBi: PAWIN_SPEAKER_7POINT1_SURROUND fits normal surround sound setups better than PAWIN_SPEAKER_7POINT1, f.i. NVidia HDMI Audio - output is silent on channels 5&6 with NVidia drivers, and channel 7&8 with Microsoft HD Audio driver using PAWIN_SPEAKER_7POINT1. - With PAWIN_SPEAKER_7POINT1_SURROUND both setups work OK. */ - return PAWIN_SPEAKER_7POINT1_SURROUND; - } - - /* Apparently some Audigy drivers will output silence - if the direct-out constant (0) is used. So this is not ideal. - - RoBi 2012-12-19: Also, NVidia driver seem to output garbage instead. Again not very ideal. - */ - return PAWIN_SPEAKER_DIRECTOUT; - - /* Note that Alec Rogers proposed the following as an alternate method to - generate the default channel mask, however it doesn't seem to be an improvement - over the above, since some drivers will matrix outputs mapping to non-present - speakers across multiple physical speakers. - - if(nChannels==1) { - pwfFormat->dwChannelMask = SPEAKER_FRONT_CENTER; - } - else { - pwfFormat->dwChannelMask = 0; - for(i=0; idwChannelMask = (pwfFormat->dwChannelMask << 1) | 0x1; - } - */ -} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/BdfFontFile.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/BdfFontFile.py deleted file mode 100644 index 161954831aee7e443d0fd83248ec722522299ac4..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/BdfFontFile.py +++ /dev/null @@ -1,122 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# bitmap distribution font (bdf) file parser -# -# history: -# 1996-05-16 fl created (as bdf2pil) -# 1997-08-25 fl converted to FontFile driver -# 2001-05-25 fl removed bogus __init__ call -# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev) -# 2003-04-22 fl more robustification (from Graham Dumpleton) -# -# Copyright (c) 1997-2003 by Secret Labs AB. -# Copyright (c) 1997-2003 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# - -""" -Parse X Bitmap Distribution Format (BDF) -""" - - -from . import FontFile, Image - -bdf_slant = { - "R": "Roman", - "I": "Italic", - "O": "Oblique", - "RI": "Reverse Italic", - "RO": "Reverse Oblique", - "OT": "Other", -} - -bdf_spacing = {"P": "Proportional", "M": "Monospaced", "C": "Cell"} - - -def bdf_char(f): - # skip to STARTCHAR - while True: - s = f.readline() - if not s: - return None - if s[:9] == b"STARTCHAR": - break - id = s[9:].strip().decode("ascii") - - # load symbol properties - props = {} - while True: - s = f.readline() - if not s or s[:6] == b"BITMAP": - break - i = s.find(b" ") - props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") - - # load bitmap - bitmap = [] - while True: - s = f.readline() - if not s or s[:7] == b"ENDCHAR": - break - bitmap.append(s[:-1]) - bitmap = b"".join(bitmap) - - # The word BBX - # followed by the width in x (BBw), height in y (BBh), - # and x and y displacement (BBxoff0, BByoff0) - # of the lower left corner from the origin of the character. - width, height, x_disp, y_disp = (int(p) for p in props["BBX"].split()) - - # The word DWIDTH - # followed by the width in x and y of the character in device pixels. - dwx, dwy = (int(p) for p in props["DWIDTH"].split()) - - bbox = ( - (dwx, dwy), - (x_disp, -y_disp - height, width + x_disp, -y_disp), - (0, 0, width, height), - ) - - try: - im = Image.frombytes("1", (width, height), bitmap, "hex", "1") - except ValueError: - # deal with zero-width characters - im = Image.new("1", (width, height)) - - return id, int(props["ENCODING"]), bbox, im - - -class BdfFontFile(FontFile.FontFile): - """Font file plugin for the X11 BDF format.""" - - def __init__(self, fp): - super().__init__() - - s = fp.readline() - if s[:13] != b"STARTFONT 2.1": - msg = "not a valid BDF file" - raise SyntaxError(msg) - - props = {} - comments = [] - - while True: - s = fp.readline() - if not s or s[:13] == b"ENDPROPERTIES": - break - i = s.find(b" ") - props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") - if s[:i] in [b"COMMENT", b"COPYRIGHT"]: - if s.find(b"LogicalFontDescription") < 0: - comments.append(s[i + 1 : -1].decode("ascii")) - - while True: - c = bdf_char(fp) - if not c: - break - id, ch, (xy, dst, src), im = c - if 0 <= ch < len(self.glyph): - self.glyph[ch] = xy, dst, src, im diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/PcxImagePlugin.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/PcxImagePlugin.py deleted file mode 100644 index 854d9e83ee7f2b19a2884b4a5556d6adc5556ae4..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/PcxImagePlugin.py +++ /dev/null @@ -1,221 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# PCX file handling -# -# This format was originally used by ZSoft's popular PaintBrush -# program for the IBM PC. It is also supported by many MS-DOS and -# Windows applications, including the Windows PaintBrush program in -# Windows 3. -# -# history: -# 1995-09-01 fl Created -# 1996-05-20 fl Fixed RGB support -# 1997-01-03 fl Fixed 2-bit and 4-bit support -# 1999-02-03 fl Fixed 8-bit support (broken in 1.0b1) -# 1999-02-07 fl Added write support -# 2002-06-09 fl Made 2-bit and 4-bit support a bit more robust -# 2002-07-30 fl Seek from to current position, not beginning of file -# 2003-06-03 fl Extract DPI settings (info["dpi"]) -# -# Copyright (c) 1997-2003 by Secret Labs AB. -# Copyright (c) 1995-2003 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# - -import io -import logging - -from . import Image, ImageFile, ImagePalette -from ._binary import i16le as i16 -from ._binary import o8 -from ._binary import o16le as o16 - -logger = logging.getLogger(__name__) - - -def _accept(prefix): - return prefix[0] == 10 and prefix[1] in [0, 2, 3, 5] - - -## -# Image plugin for Paintbrush images. - - -class PcxImageFile(ImageFile.ImageFile): - format = "PCX" - format_description = "Paintbrush" - - def _open(self): - # header - s = self.fp.read(128) - if not _accept(s): - msg = "not a PCX file" - raise SyntaxError(msg) - - # image - bbox = i16(s, 4), i16(s, 6), i16(s, 8) + 1, i16(s, 10) + 1 - if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]: - msg = "bad PCX image size" - raise SyntaxError(msg) - logger.debug("BBox: %s %s %s %s", *bbox) - - # format - version = s[1] - bits = s[3] - planes = s[65] - provided_stride = i16(s, 66) - logger.debug( - "PCX version %s, bits %s, planes %s, stride %s", - version, - bits, - planes, - provided_stride, - ) - - self.info["dpi"] = i16(s, 12), i16(s, 14) - - if bits == 1 and planes == 1: - mode = rawmode = "1" - - elif bits == 1 and planes in (2, 4): - mode = "P" - rawmode = "P;%dL" % planes - self.palette = ImagePalette.raw("RGB", s[16:64]) - - elif version == 5 and bits == 8 and planes == 1: - mode = rawmode = "L" - # FIXME: hey, this doesn't work with the incremental loader !!! - self.fp.seek(-769, io.SEEK_END) - s = self.fp.read(769) - if len(s) == 769 and s[0] == 12: - # check if the palette is linear greyscale - for i in range(256): - if s[i * 3 + 1 : i * 3 + 4] != o8(i) * 3: - mode = rawmode = "P" - break - if mode == "P": - self.palette = ImagePalette.raw("RGB", s[1:]) - self.fp.seek(128) - - elif version == 5 and bits == 8 and planes == 3: - mode = "RGB" - rawmode = "RGB;L" - - else: - msg = "unknown PCX mode" - raise OSError(msg) - - self._mode = mode - self._size = bbox[2] - bbox[0], bbox[3] - bbox[1] - - # Don't trust the passed in stride. - # Calculate the approximate position for ourselves. - # CVE-2020-35653 - stride = (self._size[0] * bits + 7) // 8 - - # While the specification states that this must be even, - # not all images follow this - if provided_stride != stride: - stride += stride % 2 - - bbox = (0, 0) + self.size - logger.debug("size: %sx%s", *self.size) - - self.tile = [("pcx", bbox, self.fp.tell(), (rawmode, planes * stride))] - - -# -------------------------------------------------------------------- -# save PCX files - - -SAVE = { - # mode: (version, bits, planes, raw mode) - "1": (2, 1, 1, "1"), - "L": (5, 8, 1, "L"), - "P": (5, 8, 1, "P"), - "RGB": (5, 8, 3, "RGB;L"), -} - - -def _save(im, fp, filename): - try: - version, bits, planes, rawmode = SAVE[im.mode] - except KeyError as e: - msg = f"Cannot save {im.mode} images as PCX" - raise ValueError(msg) from e - - # bytes per plane - stride = (im.size[0] * bits + 7) // 8 - # stride should be even - stride += stride % 2 - # Stride needs to be kept in sync with the PcxEncode.c version. - # Ideally it should be passed in in the state, but the bytes value - # gets overwritten. - - logger.debug( - "PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d", - im.size[0], - bits, - stride, - ) - - # under windows, we could determine the current screen size with - # "Image.core.display_mode()[1]", but I think that's overkill... - - screen = im.size - - dpi = 100, 100 - - # PCX header - fp.write( - o8(10) - + o8(version) - + o8(1) - + o8(bits) - + o16(0) - + o16(0) - + o16(im.size[0] - 1) - + o16(im.size[1] - 1) - + o16(dpi[0]) - + o16(dpi[1]) - + b"\0" * 24 - + b"\xFF" * 24 - + b"\0" - + o8(planes) - + o16(stride) - + o16(1) - + o16(screen[0]) - + o16(screen[1]) - + b"\0" * 54 - ) - - assert fp.tell() == 128 - - ImageFile._save(im, fp, [("pcx", (0, 0) + im.size, 0, (rawmode, bits * planes))]) - - if im.mode == "P": - # colour palette - fp.write(o8(12)) - palette = im.im.getpalette("RGB", "RGB") - palette += b"\x00" * (768 - len(palette)) - fp.write(palette) # 768 bytes - elif im.mode == "L": - # greyscale palette - fp.write(o8(12)) - for i in range(256): - fp.write(o8(i) * 3) - - -# -------------------------------------------------------------------- -# registry - - -Image.register_open(PcxImageFile.format, PcxImageFile, _accept) -Image.register_save(PcxImageFile.format, _save) - -Image.register_extension(PcxImageFile.format, ".pcx") - -Image.register_mime(PcxImageFile.format, "image/x-pcx") diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/env.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/env.py deleted file mode 100644 index 26d0d7fb151125703d5a0c84fa5d78d68f1eb8d8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/env.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Contains command to print information about the environment. - -Usage: - huggingface-cli env -""" -from argparse import _SubParsersAction - -from ..utils import dump_environment_info -from . import BaseHuggingfaceCLICommand - - -class EnvironmentCommand(BaseHuggingfaceCLICommand): - def __init__(self, args): - self.args = args - - @staticmethod - def register_subcommand(parser: _SubParsersAction): - env_parser = parser.add_parser("env", help="Print information about the environment.") - env_parser.set_defaults(func=EnvironmentCommand) - - def run(self) -> None: - dump_environment_info() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/common/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/common/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/axes_grid.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/axes_grid.py deleted file mode 100644 index ecb3e9d92c18d44dacb75dba0b85f019e0dba7a2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/axes_grid.py +++ /dev/null @@ -1,23 +0,0 @@ -from matplotlib import _api - -import mpl_toolkits.axes_grid1.axes_grid as axes_grid_orig -from .axislines import Axes - - -_api.warn_deprecated( - "3.8", name=__name__, obj_type="module", alternative="axes_grid1.axes_grid") - - -@_api.deprecated("3.8", alternative=( - "axes_grid1.axes_grid.Grid(..., axes_class=axislines.Axes")) -class Grid(axes_grid_orig.Grid): - _defaultAxesClass = Axes - - -@_api.deprecated("3.8", alternative=( - "axes_grid1.axes_grid.ImageGrid(..., axes_class=axislines.Axes")) -class ImageGrid(axes_grid_orig.ImageGrid): - _defaultAxesClass = Axes - - -AxesGrid = ImageGrid diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h deleted file mode 100644 index 69a0374dd8e997e5096d89db42977be2557175a1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * This API is only provided because it is part of publicly exported - * headers. Its use is considered DEPRECATED, and it will be removed - * eventually. - * (This includes the _PyArray_SigintHandler and _PyArray_GetSigintBuf - * functions which are however, public API, and not headers.) - * - * Instead of using these non-threadsafe macros consider periodically - * querying `PyErr_CheckSignals()` or `PyOS_InterruptOccurred()` will work. - * Both of these require holding the GIL, although cpython could add a - * version of `PyOS_InterruptOccurred()` which does not. Such a version - * actually exists as private API in Python 3.10, and backported to 3.9 and 3.8, - * see also https://bugs.python.org/issue41037 and - * https://github.com/python/cpython/pull/20599). - */ - -#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ -#define NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ - -#ifndef NPY_NO_SIGNAL - -#include -#include - -#ifndef sigsetjmp - -#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1) -#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) -#define NPY_SIGJMP_BUF jmp_buf - -#else - -#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) -#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) -#define NPY_SIGJMP_BUF sigjmp_buf - -#endif - -# define NPY_SIGINT_ON { \ - PyOS_sighandler_t _npy_sig_save; \ - _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ - if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ - 1) == 0) { \ - -# define NPY_SIGINT_OFF } \ - PyOS_setsig(SIGINT, _npy_sig_save); \ - } - -#else /* NPY_NO_SIGNAL */ - -#define NPY_SIGINT_ON -#define NPY_SIGINT_OFF - -#endif /* HAVE_SIGSETJMP */ - -#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ */ diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/api/test_api.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/api/test_api.py deleted file mode 100644 index 60bcb97aaa3642be064bcacd130edf2084c4a55c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/api/test_api.py +++ /dev/null @@ -1,383 +0,0 @@ -from __future__ import annotations - -import pytest - -import pandas as pd -from pandas import api -import pandas._testing as tm -from pandas.api import ( - extensions as api_extensions, - indexers as api_indexers, - interchange as api_interchange, - types as api_types, - typing as api_typing, -) - - -class Base: - def check(self, namespace, expected, ignored=None): - # see which names are in the namespace, minus optional - # ignored ones - # compare vs the expected - - result = sorted( - f for f in dir(namespace) if not f.startswith("__") and f != "annotations" - ) - if ignored is not None: - result = sorted(set(result) - set(ignored)) - - expected = sorted(expected) - tm.assert_almost_equal(result, expected) - - -class TestPDApi(Base): - # these are optionally imported based on testing - # & need to be ignored - ignored = ["tests", "locale", "conftest", "_version_meson"] - - # top-level sub-packages - public_lib = [ - "api", - "arrays", - "options", - "test", - "testing", - "errors", - "plotting", - "io", - "tseries", - ] - private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"] - - # misc - misc = ["IndexSlice", "NaT", "NA"] - - # top-level classes - classes = [ - "ArrowDtype", - "Categorical", - "CategoricalIndex", - "DataFrame", - "DateOffset", - "DatetimeIndex", - "ExcelFile", - "ExcelWriter", - "Flags", - "Grouper", - "HDFStore", - "Index", - "MultiIndex", - "Period", - "PeriodIndex", - "RangeIndex", - "Series", - "SparseDtype", - "StringDtype", - "Timedelta", - "TimedeltaIndex", - "Timestamp", - "Interval", - "IntervalIndex", - "CategoricalDtype", - "PeriodDtype", - "IntervalDtype", - "DatetimeTZDtype", - "BooleanDtype", - "Int8Dtype", - "Int16Dtype", - "Int32Dtype", - "Int64Dtype", - "UInt8Dtype", - "UInt16Dtype", - "UInt32Dtype", - "UInt64Dtype", - "Float32Dtype", - "Float64Dtype", - "NamedAgg", - ] - - # these are already deprecated; awaiting removal - deprecated_classes: list[str] = [] - - # external modules exposed in pandas namespace - modules: list[str] = [] - - # top-level functions - funcs = [ - "array", - "bdate_range", - "concat", - "crosstab", - "cut", - "date_range", - "interval_range", - "eval", - "factorize", - "get_dummies", - "from_dummies", - "infer_freq", - "isna", - "isnull", - "lreshape", - "melt", - "notna", - "notnull", - "offsets", - "merge", - "merge_ordered", - "merge_asof", - "period_range", - "pivot", - "pivot_table", - "qcut", - "show_versions", - "timedelta_range", - "unique", - "value_counts", - "wide_to_long", - ] - - # top-level option funcs - funcs_option = [ - "reset_option", - "describe_option", - "get_option", - "option_context", - "set_option", - "set_eng_float_format", - ] - - # top-level read_* funcs - funcs_read = [ - "read_clipboard", - "read_csv", - "read_excel", - "read_fwf", - "read_gbq", - "read_hdf", - "read_html", - "read_xml", - "read_json", - "read_pickle", - "read_sas", - "read_sql", - "read_sql_query", - "read_sql_table", - "read_stata", - "read_table", - "read_feather", - "read_parquet", - "read_orc", - "read_spss", - ] - - # top-level json funcs - funcs_json = ["json_normalize"] - - # top-level to_* funcs - funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"] - - # top-level to deprecate in the future - deprecated_funcs_in_future: list[str] = [] - - # these are already deprecated; awaiting removal - deprecated_funcs: list[str] = [] - - # private modules in pandas namespace - private_modules = [ - "_config", - "_libs", - "_is_numpy_dev", - "_pandas_datetime_CAPI", - "_pandas_parser_CAPI", - "_testing", - "_typing", - ] - if not pd._built_with_meson: - private_modules.append("_version") - - def test_api(self): - checkthese = ( - self.public_lib - + self.private_lib - + self.misc - + self.modules - + self.classes - + self.funcs - + self.funcs_option - + self.funcs_read - + self.funcs_json - + self.funcs_to - + self.private_modules - ) - self.check(namespace=pd, expected=checkthese, ignored=self.ignored) - - def test_api_all(self): - expected = set( - self.public_lib - + self.misc - + self.modules - + self.classes - + self.funcs - + self.funcs_option - + self.funcs_read - + self.funcs_json - + self.funcs_to - ) - set(self.deprecated_classes) - actual = set(pd.__all__) - - extraneous = actual - expected - assert not extraneous - - missing = expected - actual - assert not missing - - def test_depr(self): - deprecated_list = ( - self.deprecated_classes - + self.deprecated_funcs - + self.deprecated_funcs_in_future - ) - for depr in deprecated_list: - with tm.assert_produces_warning(FutureWarning): - _ = getattr(pd, depr) - - -class TestApi(Base): - allowed_api_dirs = [ - "types", - "extensions", - "indexers", - "interchange", - "typing", - ] - allowed_typing = [ - "DataFrameGroupBy", - "DatetimeIndexResamplerGroupby", - "Expanding", - "ExpandingGroupby", - "ExponentialMovingWindow", - "ExponentialMovingWindowGroupby", - "JsonReader", - "NaTType", - "NAType", - "PeriodIndexResamplerGroupby", - "Resampler", - "Rolling", - "RollingGroupby", - "SeriesGroupBy", - "StataReader", - "TimedeltaIndexResamplerGroupby", - "TimeGrouper", - "Window", - ] - allowed_api_types = [ - "is_any_real_numeric_dtype", - "is_array_like", - "is_bool", - "is_bool_dtype", - "is_categorical_dtype", - "is_complex", - "is_complex_dtype", - "is_datetime64_any_dtype", - "is_datetime64_dtype", - "is_datetime64_ns_dtype", - "is_datetime64tz_dtype", - "is_dict_like", - "is_dtype_equal", - "is_extension_array_dtype", - "is_file_like", - "is_float", - "is_float_dtype", - "is_hashable", - "is_int64_dtype", - "is_integer", - "is_integer_dtype", - "is_interval", - "is_interval_dtype", - "is_iterator", - "is_list_like", - "is_named_tuple", - "is_number", - "is_numeric_dtype", - "is_object_dtype", - "is_period_dtype", - "is_re", - "is_re_compilable", - "is_scalar", - "is_signed_integer_dtype", - "is_sparse", - "is_string_dtype", - "is_timedelta64_dtype", - "is_timedelta64_ns_dtype", - "is_unsigned_integer_dtype", - "pandas_dtype", - "infer_dtype", - "union_categoricals", - "CategoricalDtype", - "DatetimeTZDtype", - "IntervalDtype", - "PeriodDtype", - ] - allowed_api_interchange = ["from_dataframe", "DataFrame"] - allowed_api_indexers = [ - "check_array_indexer", - "BaseIndexer", - "FixedForwardWindowIndexer", - "VariableOffsetWindowIndexer", - ] - allowed_api_extensions = [ - "no_default", - "ExtensionDtype", - "register_extension_dtype", - "register_dataframe_accessor", - "register_index_accessor", - "register_series_accessor", - "take", - "ExtensionArray", - "ExtensionScalarOpsMixin", - ] - - def test_api(self): - self.check(api, self.allowed_api_dirs) - - def test_api_typing(self): - self.check(api_typing, self.allowed_typing) - - def test_api_types(self): - self.check(api_types, self.allowed_api_types) - - def test_api_interchange(self): - self.check(api_interchange, self.allowed_api_interchange) - - def test_api_indexers(self): - self.check(api_indexers, self.allowed_api_indexers) - - def test_api_extensions(self): - self.check(api_extensions, self.allowed_api_extensions) - - -class TestTesting(Base): - funcs = [ - "assert_frame_equal", - "assert_series_equal", - "assert_index_equal", - "assert_extension_array_equal", - ] - - def test_testing(self): - from pandas import testing - - self.check(testing, self.funcs) - - def test_util_in_top_level(self): - with pytest.raises(AttributeError, match="foo"): - pd.util.foo - - -def test_pandas_array_alias(): - msg = "PandasArray has been renamed NumpyExtensionArray" - with tm.assert_produces_warning(FutureWarning, match=msg): - res = pd.arrays.PandasArray - - assert res is pd.arrays.NumpyExtensionArray diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_analytics.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_analytics.py deleted file mode 100644 index c2c53fbc4637ed60dc92914f6e2ca74d5e0bdfe9..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_analytics.py +++ /dev/null @@ -1,349 +0,0 @@ -import re -import sys - -import numpy as np -import pytest - -from pandas.compat import PYPY - -from pandas import ( - Categorical, - CategoricalDtype, - DataFrame, - Index, - NaT, - Series, - date_range, -) -import pandas._testing as tm -from pandas.api.types import is_scalar - - -class TestCategoricalAnalytics: - @pytest.mark.parametrize("aggregation", ["min", "max"]) - def test_min_max_not_ordered_raises(self, aggregation): - # unordered cats have no min/max - cat = Categorical(["a", "b", "c", "d"], ordered=False) - msg = f"Categorical is not ordered for operation {aggregation}" - agg_func = getattr(cat, aggregation) - - with pytest.raises(TypeError, match=msg): - agg_func() - - ufunc = np.minimum if aggregation == "min" else np.maximum - with pytest.raises(TypeError, match=msg): - ufunc.reduce(cat) - - def test_min_max_ordered(self, index_or_series_or_array): - cat = Categorical(["a", "b", "c", "d"], ordered=True) - obj = index_or_series_or_array(cat) - _min = obj.min() - _max = obj.max() - assert _min == "a" - assert _max == "d" - - assert np.minimum.reduce(obj) == "a" - assert np.maximum.reduce(obj) == "d" - # TODO: raises if we pass axis=0 (on Index and Categorical, not Series) - - cat = Categorical( - ["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True - ) - obj = index_or_series_or_array(cat) - _min = obj.min() - _max = obj.max() - assert _min == "d" - assert _max == "a" - assert np.minimum.reduce(obj) == "d" - assert np.maximum.reduce(obj) == "a" - - def test_min_max_reduce(self): - # GH52788 - cat = Categorical(["a", "b", "c", "d"], ordered=True) - df = DataFrame(cat) - - result_max = df.agg("max") - expected_max = Series(Categorical(["d"], dtype=cat.dtype)) - tm.assert_series_equal(result_max, expected_max) - - result_min = df.agg("min") - expected_min = Series(Categorical(["a"], dtype=cat.dtype)) - tm.assert_series_equal(result_min, expected_min) - - @pytest.mark.parametrize( - "categories,expected", - [ - (list("ABC"), np.nan), - ([1, 2, 3], np.nan), - pytest.param( - Series(date_range("2020-01-01", periods=3), dtype="category"), - NaT, - marks=pytest.mark.xfail( - reason="https://github.com/pandas-dev/pandas/issues/29962" - ), - ), - ], - ) - @pytest.mark.parametrize("aggregation", ["min", "max"]) - def test_min_max_ordered_empty(self, categories, expected, aggregation): - # GH 30227 - cat = Categorical([], categories=categories, ordered=True) - - agg_func = getattr(cat, aggregation) - result = agg_func() - assert result is expected - - @pytest.mark.parametrize( - "values, categories", - [(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])], - ) - @pytest.mark.parametrize("skipna", [True, False]) - @pytest.mark.parametrize("function", ["min", "max"]) - def test_min_max_with_nan(self, values, categories, function, skipna): - # GH 25303 - cat = Categorical(values, categories=categories, ordered=True) - result = getattr(cat, function)(skipna=skipna) - - if skipna is False: - assert result is np.nan - else: - expected = categories[0] if function == "min" else categories[2] - assert result == expected - - @pytest.mark.parametrize("function", ["min", "max"]) - @pytest.mark.parametrize("skipna", [True, False]) - def test_min_max_only_nan(self, function, skipna): - # https://github.com/pandas-dev/pandas/issues/33450 - cat = Categorical([np.nan], categories=[1, 2], ordered=True) - result = getattr(cat, function)(skipna=skipna) - assert result is np.nan - - @pytest.mark.parametrize("method", ["min", "max"]) - def test_numeric_only_min_max_raises(self, method): - # GH 25303 - cat = Categorical( - [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True - ) - with pytest.raises(TypeError, match=".* got an unexpected keyword"): - getattr(cat, method)(numeric_only=True) - - @pytest.mark.parametrize("method", ["min", "max"]) - def test_numpy_min_max_raises(self, method): - cat = Categorical(["a", "b", "c", "b"], ordered=False) - msg = ( - f"Categorical is not ordered for operation {method}\n" - "you can use .as_ordered() to change the Categorical to an ordered one" - ) - method = getattr(np, method) - with pytest.raises(TypeError, match=re.escape(msg)): - method(cat) - - @pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"]) - @pytest.mark.parametrize("method", ["min", "max"]) - def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg): - cat = Categorical(["a", "b", "c", "b"], ordered=True) - msg = ( - f"the '{kwarg}' parameter is not supported in the pandas implementation " - f"of {method}" - ) - if kwarg == "axis": - msg = r"`axis` must be fewer than the number of dimensions \(1\)" - kwargs = {kwarg: 42} - method = getattr(np, method) - with pytest.raises(ValueError, match=msg): - method(cat, **kwargs) - - @pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")]) - def test_numpy_min_max_axis_equals_none(self, method, expected): - cat = Categorical(["a", "b", "c", "b"], ordered=True) - method = getattr(np, method) - result = method(cat, axis=None) - assert result == expected - - @pytest.mark.parametrize( - "values,categories,exp_mode", - [ - ([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]), - ([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]), - ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]), - ([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]), - ([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]), - ([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]), - ], - ) - def test_mode(self, values, categories, exp_mode): - cat = Categorical(values, categories=categories, ordered=True) - res = Series(cat).mode()._values - exp = Categorical(exp_mode, categories=categories, ordered=True) - tm.assert_categorical_equal(res, exp) - - def test_searchsorted(self, ordered): - # https://github.com/pandas-dev/pandas/issues/8420 - # https://github.com/pandas-dev/pandas/issues/14522 - - cat = Categorical( - ["cheese", "milk", "apple", "bread", "bread"], - categories=["cheese", "milk", "apple", "bread"], - ordered=ordered, - ) - ser = Series(cat) - - # Searching for single item argument, side='left' (default) - res_cat = cat.searchsorted("apple") - assert res_cat == 2 - assert is_scalar(res_cat) - - res_ser = ser.searchsorted("apple") - assert res_ser == 2 - assert is_scalar(res_ser) - - # Searching for single item array, side='left' (default) - res_cat = cat.searchsorted(["bread"]) - res_ser = ser.searchsorted(["bread"]) - exp = np.array([3], dtype=np.intp) - tm.assert_numpy_array_equal(res_cat, exp) - tm.assert_numpy_array_equal(res_ser, exp) - - # Searching for several items array, side='right' - res_cat = cat.searchsorted(["apple", "bread"], side="right") - res_ser = ser.searchsorted(["apple", "bread"], side="right") - exp = np.array([3, 5], dtype=np.intp) - tm.assert_numpy_array_equal(res_cat, exp) - tm.assert_numpy_array_equal(res_ser, exp) - - # Searching for a single value that is not from the Categorical - with pytest.raises(TypeError, match="cucumber"): - cat.searchsorted("cucumber") - with pytest.raises(TypeError, match="cucumber"): - ser.searchsorted("cucumber") - - # Searching for multiple values one of each is not from the Categorical - msg = ( - "Cannot setitem on a Categorical with a new category, " - "set the categories first" - ) - with pytest.raises(TypeError, match=msg): - cat.searchsorted(["bread", "cucumber"]) - with pytest.raises(TypeError, match=msg): - ser.searchsorted(["bread", "cucumber"]) - - def test_unique(self, ordered): - # GH38140 - dtype = CategoricalDtype(["a", "b", "c"], ordered=ordered) - - # categories are reordered based on value when ordered=False - cat = Categorical(["a", "b", "c"], dtype=dtype) - res = cat.unique() - tm.assert_categorical_equal(res, cat) - - cat = Categorical(["a", "b", "a", "a"], dtype=dtype) - res = cat.unique() - tm.assert_categorical_equal(res, Categorical(["a", "b"], dtype=dtype)) - - cat = Categorical(["c", "a", "b", "a", "a"], dtype=dtype) - res = cat.unique() - exp_cat = Categorical(["c", "a", "b"], dtype=dtype) - tm.assert_categorical_equal(res, exp_cat) - - # nan must be removed - cat = Categorical(["b", np.nan, "b", np.nan, "a"], dtype=dtype) - res = cat.unique() - exp_cat = Categorical(["b", np.nan, "a"], dtype=dtype) - tm.assert_categorical_equal(res, exp_cat) - - def test_unique_index_series(self, ordered): - # GH38140 - dtype = CategoricalDtype([3, 2, 1], ordered=ordered) - - c = Categorical([3, 1, 2, 2, 1], dtype=dtype) - # Categorical.unique sorts categories by appearance order - # if ordered=False - exp = Categorical([3, 1, 2], dtype=dtype) - tm.assert_categorical_equal(c.unique(), exp) - - tm.assert_index_equal(Index(c).unique(), Index(exp)) - tm.assert_categorical_equal(Series(c).unique(), exp) - - c = Categorical([1, 1, 2, 2], dtype=dtype) - exp = Categorical([1, 2], dtype=dtype) - tm.assert_categorical_equal(c.unique(), exp) - tm.assert_index_equal(Index(c).unique(), Index(exp)) - tm.assert_categorical_equal(Series(c).unique(), exp) - - def test_shift(self): - # GH 9416 - cat = Categorical(["a", "b", "c", "d", "a"]) - - # shift forward - sp1 = cat.shift(1) - xp1 = Categorical([np.nan, "a", "b", "c", "d"]) - tm.assert_categorical_equal(sp1, xp1) - tm.assert_categorical_equal(cat[:-1], sp1[1:]) - - # shift back - sn2 = cat.shift(-2) - xp2 = Categorical( - ["c", "d", "a", np.nan, np.nan], categories=["a", "b", "c", "d"] - ) - tm.assert_categorical_equal(sn2, xp2) - tm.assert_categorical_equal(cat[2:], sn2[:-2]) - - # shift by zero - tm.assert_categorical_equal(cat, cat.shift(0)) - - def test_nbytes(self): - cat = Categorical([1, 2, 3]) - exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories - assert cat.nbytes == exp - - def test_memory_usage(self): - cat = Categorical([1, 2, 3]) - - # .categories is an index, so we include the hashtable - assert 0 < cat.nbytes <= cat.memory_usage() - assert 0 < cat.nbytes <= cat.memory_usage(deep=True) - - cat = Categorical(["foo", "foo", "bar"]) - assert cat.memory_usage(deep=True) > cat.nbytes - - if not PYPY: - # sys.getsizeof will call the .memory_usage with - # deep=True, and add on some GC overhead - diff = cat.memory_usage(deep=True) - sys.getsizeof(cat) - assert abs(diff) < 100 - - def test_map(self): - c = Categorical(list("ABABC"), categories=list("CBA"), ordered=True) - result = c.map(lambda x: x.lower(), na_action=None) - exp = Categorical(list("ababc"), categories=list("cba"), ordered=True) - tm.assert_categorical_equal(result, exp) - - c = Categorical(list("ABABC"), categories=list("ABC"), ordered=False) - result = c.map(lambda x: x.lower(), na_action=None) - exp = Categorical(list("ababc"), categories=list("abc"), ordered=False) - tm.assert_categorical_equal(result, exp) - - result = c.map(lambda x: 1, na_action=None) - # GH 12766: Return an index not an array - tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64))) - - @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) - def test_validate_inplace_raises(self, value): - cat = Categorical(["A", "B", "B", "C", "A"]) - msg = ( - 'For argument "inplace" expected type bool, ' - f"received type {type(value).__name__}" - ) - - with pytest.raises(ValueError, match=msg): - cat.sort_values(inplace=value) - - def test_quantile_empty(self): - # make sure we have correct itemsize on resulting codes - cat = Categorical(["A", "B"]) - idx = Index([0.0, 0.5]) - result = cat[:0]._quantile(idx, interpolation="linear") - assert result._codes.dtype == np.int8 - - expected = cat.take([-1, -1], allow_fill=True) - tm.assert_extension_array_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/copy_view/index/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/copy_view/index/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tslibs/test_resolution.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tslibs/test_resolution.py deleted file mode 100644 index 7b2268f16a85fe784da75b3bc1f46b741d1b60c2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tslibs/test_resolution.py +++ /dev/null @@ -1,24 +0,0 @@ -import numpy as np -import pytz - -from pandas._libs.tslibs import ( - Resolution, - get_resolution, -) -from pandas._libs.tslibs.dtypes import NpyDatetimeUnit - - -def test_get_resolution_nano(): - # don't return the fallback RESO_DAY - arr = np.array([1], dtype=np.int64) - res = get_resolution(arr) - assert res == Resolution.RESO_NS - - -def test_get_resolution_non_nano_data(): - arr = np.array([1], dtype=np.int64) - res = get_resolution(arr, None, NpyDatetimeUnit.NPY_FR_us.value) - assert res == Resolution.RESO_US - - res = get_resolution(arr, pytz.UTC, NpyDatetimeUnit.NPY_FR_us.value) - assert res == Resolution.RESO_US diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/packaging/specifiers.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/packaging/specifiers.py deleted file mode 100644 index 0e218a6f9f75ea2060a8b08d1f1a043fdad68df8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/packaging/specifiers.py +++ /dev/null @@ -1,802 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import abc -import functools -import itertools -import re -import warnings -from typing import ( - Callable, - Dict, - Iterable, - Iterator, - List, - Optional, - Pattern, - Set, - Tuple, - TypeVar, - Union, -) - -from .utils import canonicalize_version -from .version import LegacyVersion, Version, parse - -ParsedVersion = Union[Version, LegacyVersion] -UnparsedVersion = Union[Version, LegacyVersion, str] -VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) -CallableOperator = Callable[[ParsedVersion, str], bool] - - -class InvalidSpecifier(ValueError): - """ - An invalid specifier was found, users should refer to PEP 440. - """ - - -class BaseSpecifier(metaclass=abc.ABCMeta): - @abc.abstractmethod - def __str__(self) -> str: - """ - Returns the str representation of this Specifier like object. This - should be representative of the Specifier itself. - """ - - @abc.abstractmethod - def __hash__(self) -> int: - """ - Returns a hash value for this Specifier like object. - """ - - @abc.abstractmethod - def __eq__(self, other: object) -> bool: - """ - Returns a boolean representing whether or not the two Specifier like - objects are equal. - """ - - @abc.abstractproperty - def prereleases(self) -> Optional[bool]: - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @prereleases.setter - def prereleases(self, value: bool) -> None: - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @abc.abstractmethod - def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: - """ - Determines if the given item is contained within this specifier. - """ - - @abc.abstractmethod - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - """ - Takes an iterable of items and filters them so that only items which - are contained within this specifier are allowed in it. - """ - - -class _IndividualSpecifier(BaseSpecifier): - - _operators: Dict[str, str] = {} - _regex: Pattern[str] - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier(f"Invalid specifier: '{spec}'") - - self._spec: Tuple[str, str] = ( - match.group("operator").strip(), - match.group("version").strip(), - ) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return f"<{self.__class__.__name__}({str(self)!r}{pre})>" - - def __str__(self) -> str: - return "{}{}".format(*self._spec) - - @property - def _canonical_spec(self) -> Tuple[str, str]: - return self._spec[0], canonicalize_version(self._spec[1]) - - def __hash__(self) -> int: - return hash(self._canonical_spec) - - def __eq__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._canonical_spec == other._canonical_spec - - def _get_operator(self, op: str) -> CallableOperator: - operator_callable: CallableOperator = getattr( - self, f"_compare_{self._operators[op]}" - ) - return operator_callable - - def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self) -> str: - return self._spec[0] - - @property - def version(self) -> str: - return self._spec[1] - - @property - def prereleases(self) -> Optional[bool]: - return self._prereleases - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: str) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - normalized_item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if normalized_item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - operator_callable: CallableOperator = self._get_operator(self.operator) - return operator_callable(normalized_item, self.version) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later in case nothing - # else matches this specifier. - if parsed_version.is_prerelease and not ( - prereleases or self.prereleases - ): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the beginning. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(==|!=|<=|>=|<|>)) - \s* - (?P - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - super().__init__(spec, prereleases) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal( - self, prospective: LegacyVersion, spec: str - ) -> bool: - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective > self._coerce_version(spec) - - -def _require_version_compare( - fn: Callable[["Specifier", ParsedVersion, str], bool] -) -> Callable[["Specifier", ParsedVersion, str], bool]: - @functools.wraps(fn) - def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - - return wrapped - - -class Specifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(~=|==|!=|<=|>=|<|>|===)) - (?P - (?: - # The identity operators allow for an escape hatch that will - # do an exact string match of the version you wish to install. - # This will not be parsed by PEP 440 and we cannot determine - # any semantic meaning from it. This operator is discouraged - # but included entirely as an escape hatch. - (?<====) # Only match for the identity operator - \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. - ) - | - (?: - # The (non)equality operators allow for wild card and local - # versions to be specified so we have to define these two - # operators separately to enable that. - (?<===|!=) # Only match for equals and not equals - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. - (?: - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* - )? - ) - | - (?: - # The compatible operator requires at least two digits in the - # release segment. - (?<=~=) # Only match for the compatible operator - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - | - (?: - # All other operators only allow a sub set of what the - # (non)equality operators do. Specifically they do not allow - # local versions to be specified nor do they allow the prefix - # matching wild cards. - (?=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - "===": "arbitrary", - } - - @_require_version_compare - def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: - - # Compatible releases have an equivalent combination of >= and ==. That - # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to - # implement this in terms of the other specifiers instead of - # implementing it ourselves. The only thing we need to do is construct - # the other specifiers. - - # We want everything but the last item in the version, but we want to - # ignore suffix segments. - prefix = ".".join( - list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] - ) - - # Add the prefix notation to the end of our string - prefix += ".*" - - return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( - prospective, prefix - ) - - @_require_version_compare - def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: - - # We need special logic to handle prefix matching - if spec.endswith(".*"): - # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. - split_spec = _version_split(spec[:-2]) # Remove the trailing .* - - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. - split_prospective = _version_split(str(prospective)) - - # Shorten the prospective version to be the same length as the spec - # so that we can determine if the specifier is a prefix of the - # prospective version or not. - shortened_prospective = split_prospective[: len(split_spec)] - - # Pad out our two sides with zeros so that they both equal the same - # length. - padded_spec, padded_prospective = _pad_version( - split_spec, shortened_prospective - ) - - return padded_prospective == padded_spec - else: - # Convert our spec string into a Version - spec_version = Version(spec) - - # If the specifier does not have a local segment, then we want to - # act as if the prospective version also does not have a local - # segment. - if not spec_version.local: - prospective = Version(prospective.public) - - return prospective == spec_version - - @_require_version_compare - def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: - return not self._compare_equal(prospective, spec) - - @_require_version_compare - def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: - - # NB: Local version identifiers are NOT permitted in the version - # specifier, so local version labels can be universally removed from - # the prospective version. - return Version(prospective.public) <= Version(spec) - - @_require_version_compare - def _compare_greater_than_equal( - self, prospective: ParsedVersion, spec: str - ) -> bool: - - # NB: Local version identifiers are NOT permitted in the version - # specifier, so local version labels can be universally removed from - # the prospective version. - return Version(prospective.public) >= Version(spec) - - @_require_version_compare - def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: - - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec_str) - - # Check to see if the prospective version is less than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective < spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a pre-release version, that we do not accept pre-release - # versions for the version mentioned in the specifier (e.g. <3.1 should - # not match 3.1.dev0, but should match 3.0.dev0). - if not spec.is_prerelease and prospective.is_prerelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # less than the spec version *and* it's not a pre-release of the same - # version in the spec. - return True - - @_require_version_compare - def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: - - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec_str) - - # Check to see if the prospective version is greater than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective > spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a post-release version, that we do not accept - # post-release versions for the version mentioned in the specifier - # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). - if not spec.is_postrelease and prospective.is_postrelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # Ensure that we do not allow a local version of the version mentioned - # in the specifier, which is technically greater than, to match. - if prospective.local is not None: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # greater than the spec version *and* it's not a pre-release of the - # same version in the spec. - return True - - def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: - return str(prospective).lower() == str(spec).lower() - - @property - def prereleases(self) -> bool: - - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases - - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] - - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True - - return False - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - -_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") - - -def _version_split(version: str) -> List[str]: - result: List[str] = [] - for item in version.split("."): - match = _prefix_regex.search(item) - if match: - result.extend(match.groups()) - else: - result.append(item) - return result - - -def _is_not_suffix(segment: str) -> bool: - return not any( - segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") - ) - - -def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: - left_split, right_split = [], [] - - # Get the release segment of our versions - left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) - right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) - - # Get the rest of our versions - left_split.append(left[len(left_split[0]) :]) - right_split.append(right[len(right_split[0]) :]) - - # Insert our padding - left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) - right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) - - return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) - - -class SpecifierSet(BaseSpecifier): - def __init__( - self, specifiers: str = "", prereleases: Optional[bool] = None - ) -> None: - - # Split on , to break each individual specifier into it's own item, and - # strip each item to remove leading/trailing whitespace. - split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - - # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed: Set[_IndividualSpecifier] = set() - for specifier in split_specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) - - # Store our prereleases value so we can use it later to determine if - # we accept prereleases or not. - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return f"" - - def __str__(self) -> str: - return ",".join(sorted(str(s) for s in self._specs)) - - def __hash__(self) -> int: - return hash(self._specs) - - def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": - if isinstance(other, str): - other = SpecifierSet(other) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - specifier = SpecifierSet() - specifier._specs = frozenset(self._specs | other._specs) - - if self._prereleases is None and other._prereleases is not None: - specifier._prereleases = other._prereleases - elif self._prereleases is not None and other._prereleases is None: - specifier._prereleases = self._prereleases - elif self._prereleases == other._prereleases: - specifier._prereleases = self._prereleases - else: - raise ValueError( - "Cannot combine SpecifierSets with True and False prerelease " - "overrides." - ) - - return specifier - - def __eq__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs == other._specs - - def __len__(self) -> int: - return len(self._specs) - - def __iter__(self) -> Iterator[_IndividualSpecifier]: - return iter(self._specs) - - @property - def prereleases(self) -> Optional[bool]: - - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: UnparsedVersion) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # We can determine if we're going to allow pre-releases by looking to - # see if any of the underlying items supports them. If none of them do - # and this item is a pre-release then we do not allow it and we can - # short circuit that here. - # Note: This means that 1.0.dev1 would not be contained in something - # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 - if not prereleases and item.is_prerelease: - return False - - # We simply dispatch to the underlying specs here to make sure that the - # given version is contained within all of them. - # Note: This use of all() here means that an empty set of specifiers - # will always return True, this is an explicit design decision. - return all(s.contains(item, prereleases=prereleases) for s in self._specs) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # If we have any specifiers, then we want to wrap our iterable in the - # filter method for each one, this will act as a logical AND amongst - # each specifier. - if self._specs: - for spec in self._specs: - iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable - # If we do not have any specifiers, then we need to have a rough filter - # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. - else: - filtered: List[VersionTypeVar] = [] - found_prereleases: List[VersionTypeVar] = [] - - item: UnparsedVersion - parsed_version: Union[Version, LegacyVersion] - - for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue - - # Store any item which is a pre-release for later unless we've - # already found a final version or we are accepting prereleases - if parsed_version.is_prerelease and not prereleases: - if not filtered: - found_prereleases.append(item) - else: - filtered.append(item) - - # If we've found no items except for pre-releases, then we'll go - # ahead and use the pre-releases - if not filtered and found_prereleases and prereleases is None: - return found_prereleases - - return filtered diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/v1/validators.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/v1/validators.py deleted file mode 100644 index 549a235e0c3dec621afb4eb0d872dda986e6bae1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/v1/validators.py +++ /dev/null @@ -1,765 +0,0 @@ -import math -import re -from collections import OrderedDict, deque -from collections.abc import Hashable as CollectionsHashable -from datetime import date, datetime, time, timedelta -from decimal import Decimal, DecimalException -from enum import Enum, IntEnum -from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network -from pathlib import Path -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Deque, - Dict, - ForwardRef, - FrozenSet, - Generator, - Hashable, - List, - NamedTuple, - Pattern, - Set, - Tuple, - Type, - TypeVar, - Union, -) -from uuid import UUID - -from . import errors -from .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time -from .typing import ( - AnyCallable, - all_literal_values, - display_as_type, - get_class, - is_callable_type, - is_literal_type, - is_namedtuple, - is_none_type, - is_typeddict, -) -from .utils import almost_equal_floats, lenient_issubclass, sequence_like - -if TYPE_CHECKING: - from typing_extensions import Literal, TypedDict - - from .config import BaseConfig - from .fields import ModelField - from .types import ConstrainedDecimal, ConstrainedFloat, ConstrainedInt - - ConstrainedNumber = Union[ConstrainedDecimal, ConstrainedFloat, ConstrainedInt] - AnyOrderedDict = OrderedDict[Any, Any] - Number = Union[int, float, Decimal] - StrBytes = Union[str, bytes] - - -def str_validator(v: Any) -> Union[str]: - if isinstance(v, str): - if isinstance(v, Enum): - return v.value - else: - return v - elif isinstance(v, (float, int, Decimal)): - # is there anything else we want to add here? If you think so, create an issue. - return str(v) - elif isinstance(v, (bytes, bytearray)): - return v.decode() - else: - raise errors.StrError() - - -def strict_str_validator(v: Any) -> Union[str]: - if isinstance(v, str) and not isinstance(v, Enum): - return v - raise errors.StrError() - - -def bytes_validator(v: Any) -> Union[bytes]: - if isinstance(v, bytes): - return v - elif isinstance(v, bytearray): - return bytes(v) - elif isinstance(v, str): - return v.encode() - elif isinstance(v, (float, int, Decimal)): - return str(v).encode() - else: - raise errors.BytesError() - - -def strict_bytes_validator(v: Any) -> Union[bytes]: - if isinstance(v, bytes): - return v - elif isinstance(v, bytearray): - return bytes(v) - else: - raise errors.BytesError() - - -BOOL_FALSE = {0, '0', 'off', 'f', 'false', 'n', 'no'} -BOOL_TRUE = {1, '1', 'on', 't', 'true', 'y', 'yes'} - - -def bool_validator(v: Any) -> bool: - if v is True or v is False: - return v - if isinstance(v, bytes): - v = v.decode() - if isinstance(v, str): - v = v.lower() - try: - if v in BOOL_TRUE: - return True - if v in BOOL_FALSE: - return False - except TypeError: - raise errors.BoolError() - raise errors.BoolError() - - -# matches the default limit cpython, see https://github.com/python/cpython/pull/96500 -max_str_int = 4_300 - - -def int_validator(v: Any) -> int: - if isinstance(v, int) and not (v is True or v is False): - return v - - # see https://github.com/pydantic/pydantic/issues/1477 and in turn, https://github.com/python/cpython/issues/95778 - # this check should be unnecessary once patch releases are out for 3.7, 3.8, 3.9 and 3.10 - # but better to check here until then. - # NOTICE: this does not fully protect user from the DOS risk since the standard library JSON implementation - # (and other std lib modules like xml) use `int()` and are likely called before this, the best workaround is to - # 1. update to the latest patch release of python once released, 2. use a different JSON library like ujson - if isinstance(v, (str, bytes, bytearray)) and len(v) > max_str_int: - raise errors.IntegerError() - - try: - return int(v) - except (TypeError, ValueError, OverflowError): - raise errors.IntegerError() - - -def strict_int_validator(v: Any) -> int: - if isinstance(v, int) and not (v is True or v is False): - return v - raise errors.IntegerError() - - -def float_validator(v: Any) -> float: - if isinstance(v, float): - return v - - try: - return float(v) - except (TypeError, ValueError): - raise errors.FloatError() - - -def strict_float_validator(v: Any) -> float: - if isinstance(v, float): - return v - raise errors.FloatError() - - -def float_finite_validator(v: 'Number', field: 'ModelField', config: 'BaseConfig') -> 'Number': - allow_inf_nan = getattr(field.type_, 'allow_inf_nan', None) - if allow_inf_nan is None: - allow_inf_nan = config.allow_inf_nan - - if allow_inf_nan is False and (math.isnan(v) or math.isinf(v)): - raise errors.NumberNotFiniteError() - return v - - -def number_multiple_validator(v: 'Number', field: 'ModelField') -> 'Number': - field_type: ConstrainedNumber = field.type_ - if field_type.multiple_of is not None: - mod = float(v) / float(field_type.multiple_of) % 1 - if not almost_equal_floats(mod, 0.0) and not almost_equal_floats(mod, 1.0): - raise errors.NumberNotMultipleError(multiple_of=field_type.multiple_of) - return v - - -def number_size_validator(v: 'Number', field: 'ModelField') -> 'Number': - field_type: ConstrainedNumber = field.type_ - if field_type.gt is not None and not v > field_type.gt: - raise errors.NumberNotGtError(limit_value=field_type.gt) - elif field_type.ge is not None and not v >= field_type.ge: - raise errors.NumberNotGeError(limit_value=field_type.ge) - - if field_type.lt is not None and not v < field_type.lt: - raise errors.NumberNotLtError(limit_value=field_type.lt) - if field_type.le is not None and not v <= field_type.le: - raise errors.NumberNotLeError(limit_value=field_type.le) - - return v - - -def constant_validator(v: 'Any', field: 'ModelField') -> 'Any': - """Validate ``const`` fields. - - The value provided for a ``const`` field must be equal to the default value - of the field. This is to support the keyword of the same name in JSON - Schema. - """ - if v != field.default: - raise errors.WrongConstantError(given=v, permitted=[field.default]) - - return v - - -def anystr_length_validator(v: 'StrBytes', config: 'BaseConfig') -> 'StrBytes': - v_len = len(v) - - min_length = config.min_anystr_length - if v_len < min_length: - raise errors.AnyStrMinLengthError(limit_value=min_length) - - max_length = config.max_anystr_length - if max_length is not None and v_len > max_length: - raise errors.AnyStrMaxLengthError(limit_value=max_length) - - return v - - -def anystr_strip_whitespace(v: 'StrBytes') -> 'StrBytes': - return v.strip() - - -def anystr_upper(v: 'StrBytes') -> 'StrBytes': - return v.upper() - - -def anystr_lower(v: 'StrBytes') -> 'StrBytes': - return v.lower() - - -def ordered_dict_validator(v: Any) -> 'AnyOrderedDict': - if isinstance(v, OrderedDict): - return v - - try: - return OrderedDict(v) - except (TypeError, ValueError): - raise errors.DictError() - - -def dict_validator(v: Any) -> Dict[Any, Any]: - if isinstance(v, dict): - return v - - try: - return dict(v) - except (TypeError, ValueError): - raise errors.DictError() - - -def list_validator(v: Any) -> List[Any]: - if isinstance(v, list): - return v - elif sequence_like(v): - return list(v) - else: - raise errors.ListError() - - -def tuple_validator(v: Any) -> Tuple[Any, ...]: - if isinstance(v, tuple): - return v - elif sequence_like(v): - return tuple(v) - else: - raise errors.TupleError() - - -def set_validator(v: Any) -> Set[Any]: - if isinstance(v, set): - return v - elif sequence_like(v): - return set(v) - else: - raise errors.SetError() - - -def frozenset_validator(v: Any) -> FrozenSet[Any]: - if isinstance(v, frozenset): - return v - elif sequence_like(v): - return frozenset(v) - else: - raise errors.FrozenSetError() - - -def deque_validator(v: Any) -> Deque[Any]: - if isinstance(v, deque): - return v - elif sequence_like(v): - return deque(v) - else: - raise errors.DequeError() - - -def enum_member_validator(v: Any, field: 'ModelField', config: 'BaseConfig') -> Enum: - try: - enum_v = field.type_(v) - except ValueError: - # field.type_ should be an enum, so will be iterable - raise errors.EnumMemberError(enum_values=list(field.type_)) - return enum_v.value if config.use_enum_values else enum_v - - -def uuid_validator(v: Any, field: 'ModelField') -> UUID: - try: - if isinstance(v, str): - v = UUID(v) - elif isinstance(v, (bytes, bytearray)): - try: - v = UUID(v.decode()) - except ValueError: - # 16 bytes in big-endian order as the bytes argument fail - # the above check - v = UUID(bytes=v) - except ValueError: - raise errors.UUIDError() - - if not isinstance(v, UUID): - raise errors.UUIDError() - - required_version = getattr(field.type_, '_required_version', None) - if required_version and v.version != required_version: - raise errors.UUIDVersionError(required_version=required_version) - - return v - - -def decimal_validator(v: Any) -> Decimal: - if isinstance(v, Decimal): - return v - elif isinstance(v, (bytes, bytearray)): - v = v.decode() - - v = str(v).strip() - - try: - v = Decimal(v) - except DecimalException: - raise errors.DecimalError() - - if not v.is_finite(): - raise errors.DecimalIsNotFiniteError() - - return v - - -def hashable_validator(v: Any) -> Hashable: - if isinstance(v, Hashable): - return v - - raise errors.HashableError() - - -def ip_v4_address_validator(v: Any) -> IPv4Address: - if isinstance(v, IPv4Address): - return v - - try: - return IPv4Address(v) - except ValueError: - raise errors.IPv4AddressError() - - -def ip_v6_address_validator(v: Any) -> IPv6Address: - if isinstance(v, IPv6Address): - return v - - try: - return IPv6Address(v) - except ValueError: - raise errors.IPv6AddressError() - - -def ip_v4_network_validator(v: Any) -> IPv4Network: - """ - Assume IPv4Network initialised with a default ``strict`` argument - - See more: - https://docs.python.org/library/ipaddress.html#ipaddress.IPv4Network - """ - if isinstance(v, IPv4Network): - return v - - try: - return IPv4Network(v) - except ValueError: - raise errors.IPv4NetworkError() - - -def ip_v6_network_validator(v: Any) -> IPv6Network: - """ - Assume IPv6Network initialised with a default ``strict`` argument - - See more: - https://docs.python.org/library/ipaddress.html#ipaddress.IPv6Network - """ - if isinstance(v, IPv6Network): - return v - - try: - return IPv6Network(v) - except ValueError: - raise errors.IPv6NetworkError() - - -def ip_v4_interface_validator(v: Any) -> IPv4Interface: - if isinstance(v, IPv4Interface): - return v - - try: - return IPv4Interface(v) - except ValueError: - raise errors.IPv4InterfaceError() - - -def ip_v6_interface_validator(v: Any) -> IPv6Interface: - if isinstance(v, IPv6Interface): - return v - - try: - return IPv6Interface(v) - except ValueError: - raise errors.IPv6InterfaceError() - - -def path_validator(v: Any) -> Path: - if isinstance(v, Path): - return v - - try: - return Path(v) - except TypeError: - raise errors.PathError() - - -def path_exists_validator(v: Any) -> Path: - if not v.exists(): - raise errors.PathNotExistsError(path=v) - - return v - - -def callable_validator(v: Any) -> AnyCallable: - """ - Perform a simple check if the value is callable. - - Note: complete matching of argument type hints and return types is not performed - """ - if callable(v): - return v - - raise errors.CallableError(value=v) - - -def enum_validator(v: Any) -> Enum: - if isinstance(v, Enum): - return v - - raise errors.EnumError(value=v) - - -def int_enum_validator(v: Any) -> IntEnum: - if isinstance(v, IntEnum): - return v - - raise errors.IntEnumError(value=v) - - -def make_literal_validator(type_: Any) -> Callable[[Any], Any]: - permitted_choices = all_literal_values(type_) - - # To have a O(1) complexity and still return one of the values set inside the `Literal`, - # we create a dict with the set values (a set causes some problems with the way intersection works). - # In some cases the set value and checked value can indeed be different (see `test_literal_validator_str_enum`) - allowed_choices = {v: v for v in permitted_choices} - - def literal_validator(v: Any) -> Any: - try: - return allowed_choices[v] - except (KeyError, TypeError): - raise errors.WrongConstantError(given=v, permitted=permitted_choices) - - return literal_validator - - -def constr_length_validator(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes': - v_len = len(v) - - min_length = field.type_.min_length if field.type_.min_length is not None else config.min_anystr_length - if v_len < min_length: - raise errors.AnyStrMinLengthError(limit_value=min_length) - - max_length = field.type_.max_length if field.type_.max_length is not None else config.max_anystr_length - if max_length is not None and v_len > max_length: - raise errors.AnyStrMaxLengthError(limit_value=max_length) - - return v - - -def constr_strip_whitespace(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes': - strip_whitespace = field.type_.strip_whitespace or config.anystr_strip_whitespace - if strip_whitespace: - v = v.strip() - - return v - - -def constr_upper(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes': - upper = field.type_.to_upper or config.anystr_upper - if upper: - v = v.upper() - - return v - - -def constr_lower(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes': - lower = field.type_.to_lower or config.anystr_lower - if lower: - v = v.lower() - return v - - -def validate_json(v: Any, config: 'BaseConfig') -> Any: - if v is None: - # pass None through to other validators - return v - try: - return config.json_loads(v) # type: ignore - except ValueError: - raise errors.JsonError() - except TypeError: - raise errors.JsonTypeError() - - -T = TypeVar('T') - - -def make_arbitrary_type_validator(type_: Type[T]) -> Callable[[T], T]: - def arbitrary_type_validator(v: Any) -> T: - if isinstance(v, type_): - return v - raise errors.ArbitraryTypeError(expected_arbitrary_type=type_) - - return arbitrary_type_validator - - -def make_class_validator(type_: Type[T]) -> Callable[[Any], Type[T]]: - def class_validator(v: Any) -> Type[T]: - if lenient_issubclass(v, type_): - return v - raise errors.SubclassError(expected_class=type_) - - return class_validator - - -def any_class_validator(v: Any) -> Type[T]: - if isinstance(v, type): - return v - raise errors.ClassError() - - -def none_validator(v: Any) -> 'Literal[None]': - if v is None: - return v - raise errors.NotNoneError() - - -def pattern_validator(v: Any) -> Pattern[str]: - if isinstance(v, Pattern): - return v - - str_value = str_validator(v) - - try: - return re.compile(str_value) - except re.error: - raise errors.PatternError() - - -NamedTupleT = TypeVar('NamedTupleT', bound=NamedTuple) - - -def make_namedtuple_validator( - namedtuple_cls: Type[NamedTupleT], config: Type['BaseConfig'] -) -> Callable[[Tuple[Any, ...]], NamedTupleT]: - from .annotated_types import create_model_from_namedtuple - - NamedTupleModel = create_model_from_namedtuple( - namedtuple_cls, - __config__=config, - __module__=namedtuple_cls.__module__, - ) - namedtuple_cls.__pydantic_model__ = NamedTupleModel # type: ignore[attr-defined] - - def namedtuple_validator(values: Tuple[Any, ...]) -> NamedTupleT: - annotations = NamedTupleModel.__annotations__ - - if len(values) > len(annotations): - raise errors.ListMaxLengthError(limit_value=len(annotations)) - - dict_values: Dict[str, Any] = dict(zip(annotations, values)) - validated_dict_values: Dict[str, Any] = dict(NamedTupleModel(**dict_values)) - return namedtuple_cls(**validated_dict_values) - - return namedtuple_validator - - -def make_typeddict_validator( - typeddict_cls: Type['TypedDict'], config: Type['BaseConfig'] # type: ignore[valid-type] -) -> Callable[[Any], Dict[str, Any]]: - from .annotated_types import create_model_from_typeddict - - TypedDictModel = create_model_from_typeddict( - typeddict_cls, - __config__=config, - __module__=typeddict_cls.__module__, - ) - typeddict_cls.__pydantic_model__ = TypedDictModel # type: ignore[attr-defined] - - def typeddict_validator(values: 'TypedDict') -> Dict[str, Any]: # type: ignore[valid-type] - return TypedDictModel.parse_obj(values).dict(exclude_unset=True) - - return typeddict_validator - - -class IfConfig: - def __init__(self, validator: AnyCallable, *config_attr_names: str, ignored_value: Any = False) -> None: - self.validator = validator - self.config_attr_names = config_attr_names - self.ignored_value = ignored_value - - def check(self, config: Type['BaseConfig']) -> bool: - return any(getattr(config, name) not in {None, self.ignored_value} for name in self.config_attr_names) - - -# order is important here, for example: bool is a subclass of int so has to come first, datetime before date same, -# IPv4Interface before IPv4Address, etc -_VALIDATORS: List[Tuple[Type[Any], List[Any]]] = [ - (IntEnum, [int_validator, enum_member_validator]), - (Enum, [enum_member_validator]), - ( - str, - [ - str_validator, - IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'), - IfConfig(anystr_upper, 'anystr_upper'), - IfConfig(anystr_lower, 'anystr_lower'), - IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'), - ], - ), - ( - bytes, - [ - bytes_validator, - IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'), - IfConfig(anystr_upper, 'anystr_upper'), - IfConfig(anystr_lower, 'anystr_lower'), - IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'), - ], - ), - (bool, [bool_validator]), - (int, [int_validator]), - (float, [float_validator, IfConfig(float_finite_validator, 'allow_inf_nan', ignored_value=True)]), - (Path, [path_validator]), - (datetime, [parse_datetime]), - (date, [parse_date]), - (time, [parse_time]), - (timedelta, [parse_duration]), - (OrderedDict, [ordered_dict_validator]), - (dict, [dict_validator]), - (list, [list_validator]), - (tuple, [tuple_validator]), - (set, [set_validator]), - (frozenset, [frozenset_validator]), - (deque, [deque_validator]), - (UUID, [uuid_validator]), - (Decimal, [decimal_validator]), - (IPv4Interface, [ip_v4_interface_validator]), - (IPv6Interface, [ip_v6_interface_validator]), - (IPv4Address, [ip_v4_address_validator]), - (IPv6Address, [ip_v6_address_validator]), - (IPv4Network, [ip_v4_network_validator]), - (IPv6Network, [ip_v6_network_validator]), -] - - -def find_validators( # noqa: C901 (ignore complexity) - type_: Type[Any], config: Type['BaseConfig'] -) -> Generator[AnyCallable, None, None]: - from .dataclasses import is_builtin_dataclass, make_dataclass_validator - - if type_ is Any or type_ is object: - return - type_type = type_.__class__ - if type_type == ForwardRef or type_type == TypeVar: - return - - if is_none_type(type_): - yield none_validator - return - if type_ is Pattern or type_ is re.Pattern: - yield pattern_validator - return - if type_ is Hashable or type_ is CollectionsHashable: - yield hashable_validator - return - if is_callable_type(type_): - yield callable_validator - return - if is_literal_type(type_): - yield make_literal_validator(type_) - return - if is_builtin_dataclass(type_): - yield from make_dataclass_validator(type_, config) - return - if type_ is Enum: - yield enum_validator - return - if type_ is IntEnum: - yield int_enum_validator - return - if is_namedtuple(type_): - yield tuple_validator - yield make_namedtuple_validator(type_, config) - return - if is_typeddict(type_): - yield make_typeddict_validator(type_, config) - return - - class_ = get_class(type_) - if class_ is not None: - if class_ is not Any and isinstance(class_, type): - yield make_class_validator(class_) - else: - yield any_class_validator - return - - for val_type, validators in _VALIDATORS: - try: - if issubclass(type_, val_type): - for v in validators: - if isinstance(v, IfConfig): - if v.check(config): - yield v.validator - else: - yield v - return - except TypeError: - raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') - - if config.arbitrary_types_allowed: - yield make_arbitrary_type_validator(type_) - else: - raise RuntimeError(f'no validator found for {type_}, see `arbitrary_types_allowed` in Config') diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/apdlexer.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/apdlexer.py deleted file mode 100644 index a50219c3b24731d0045e4588f42e6de51069feda..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/apdlexer.py +++ /dev/null @@ -1,592 +0,0 @@ -""" - pygments.lexers.apdlexer - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for ANSYS Parametric Design Language. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, words, default -from pygments.token import Comment, Keyword, Name, Number, Operator, \ - String, Generic, Punctuation, Whitespace, Escape - -__all__ = ['apdlexer'] - - -class apdlexer(RegexLexer): - """ - For APDL source code. - - .. versionadded:: 2.9 - """ - name = 'ANSYS parametric design language' - aliases = ['ansys', 'apdl'] - filenames = ['*.ans'] - flags = re.IGNORECASE - - # list of elements - elafunb = ("SURF152", "SURF153", "SURF154", "SURF156", "SHELL157", - "SURF159", "LINK160", "BEAM161", "PLANE162", - "SHELL163", "SOLID164", "COMBI165", "MASS166", - "LINK167", "SOLID168", "TARGE169", "TARGE170", - "CONTA171", "CONTA172", "CONTA173", "CONTA174", - "CONTA175", "CONTA176", "CONTA177", "CONTA178", - "PRETS179", "LINK180", "SHELL181", "PLANE182", - "PLANE183", "MPC184", "SOLID185", "SOLID186", - "SOLID187", "BEAM188", "BEAM189", "SOLSH190", - "INTER192", "INTER193", "INTER194", "INTER195", - "MESH200", "FOLLW201", "INTER202", "INTER203", - "INTER204", "INTER205", "SHELL208", "SHELL209", - "CPT212", "CPT213", "COMBI214", "CPT215", "CPT216", - "CPT217", "FLUID220", "FLUID221", "PLANE223", - "SOLID226", "SOLID227", "PLANE230", "SOLID231", - "SOLID232", "PLANE233", "SOLID236", "SOLID237", - "PLANE238", "SOLID239", "SOLID240", "HSFLD241", - "HSFLD242", "SURF251", "SURF252", "REINF263", - "REINF264", "REINF265", "SOLID272", "SOLID273", - "SOLID278", "SOLID279", "SHELL281", "SOLID285", - "PIPE288", "PIPE289", "ELBOW290", "USER300", "BEAM3", - "BEAM4", "BEAM23", "BEAM24", "BEAM44", "BEAM54", - "COMBIN7", "FLUID79", "FLUID80", "FLUID81", "FLUID141", - "FLUID142", "INFIN9", "INFIN47", "PLANE13", "PLANE25", - "PLANE42", "PLANE53", "PLANE67", "PLANE82", "PLANE83", - "PLANE145", "PLANE146", "CONTAC12", "CONTAC52", - "LINK1", "LINK8", "LINK10", "LINK32", "PIPE16", - "PIPE17", "PIPE18", "PIPE20", "PIPE59", "PIPE60", - "SHELL41", "SHELL43", "SHELL57", "SHELL63", "SHELL91", - "SHELL93", "SHELL99", "SHELL150", "SOLID5", "SOLID45", - "SOLID46", "SOLID65", "SOLID69", "SOLID92", "SOLID95", - "SOLID117", "SOLID127", "SOLID128", "SOLID147", - "SOLID148", "SOLID191", "VISCO88", "VISCO89", - "VISCO106", "VISCO107", "VISCO108", "TRANS109") - - elafunc = ("PGRAPH", "/VT", "VTIN", "VTRFIL", "VTTEMP", "PGRSET", - "VTCLR", "VTMETH", "VTRSLT", "VTVMOD", "PGSELE", - "VTDISC", "VTMP", "VTSEC", "PGWRITE", "VTEVAL", "VTOP", - "VTSFE", "POUTRES", "VTFREQ", "VTPOST", "VTSL", - "FLDATA1-40", "HFPCSWP", "MSDATA", "MSVARY", "QFACT", - "FLOCHECK", "HFPOWER", "MSMASS", "PERI", "SPADP", - "FLREAD", "HFPORT", "MSMETH", "PLFSS", "SPARM", - "FLOTRAN", "HFSCAT", "MSMIR", "PLSCH", "SPFSS", - "HFADP", "ICE", "MSNOMF", "PLSYZ", "SPICE", "HFARRAY", - "ICEDELE", "MSPROP", "PLTD", "SPSCAN", "HFDEEM", - "ICELIST", "MSQUAD", "PLTLINE", "SPSWP", "HFEIGOPT", - "ICVFRC", "MSRELAX", "PLVFRC", "HFEREFINE", "LPRT", - "MSSOLU", "/PICE", "HFMODPRT", "MSADV", "MSSPEC", - "PLWAVE", "HFPA", "MSCAP", "MSTERM", "PRSYZ") - - elafund = ("*VOPER", "VOVLAP", "*VPLOT", "VPLOT", "VPTN", "*VPUT", - "VPUT", "*VREAD", "VROTAT", "VSBA", "VSBV", "VSBW", - "/VSCALE", "*VSCFUN", "VSEL", "VSLA", "*VSTAT", "VSUM", - "VSWEEP", "VSYMM", "VTRAN", "VTYPE", "/VUP", "*VWRITE", - "/WAIT", "WAVES", "WERASE", "WFRONT", "/WINDOW", - "WMID", "WMORE", "WPAVE", "WPCSYS", "WPLANE", "WPOFFS", - "WPROTA", "WPSTYL", "WRFULL", "WRITE", "WRITEMAP", - "*WRK", "WSORT", "WSPRINGS", "WSTART", "WTBCREATE", - "XFDATA", "XFENRICH", "XFLIST", "/XFRM", "/XRANGE", - "XVAR", "/YRANGE", "/ZOOM", "/WB", "XMLO", "/XML", - "CNTR", "EBLOCK", "CMBLOCK", "NBLOCK", "/TRACK", - "CWZPLOT", "~EUI", "NELE", "EALL", "NALL", "FLITEM", - "LSLN", "PSOLVE", "ASLN", "/VERIFY", "/SSS", "~CFIN", - "*EVAL", "*MOONEY", "/RUNSTAT", "ALPFILL", - "ARCOLLAPSE", "ARDETACH", "ARFILL", "ARMERGE", - "ARSPLIT", "FIPLOT", "GAPFINISH", "GAPLIST", - "GAPMERGE", "GAPOPT", "GAPPLOT", "LNCOLLAPSE", - "LNDETACH", "LNFILL", "LNMERGE", "LNSPLIT", "PCONV", - "PLCONV", "PEMOPTS", "PEXCLUDE", "PINCLUDE", "PMETH", - "/PMETH", "PMOPTS", "PPLOT", "PPRANGE", "PRCONV", - "PRECISION", "RALL", "RFILSZ", "RITER", "RMEMRY", - "RSPEED", "RSTAT", "RTIMST", "/RUNST", "RWFRNT", - "SARPLOT", "SHSD", "SLPPLOT", "SLSPLOT", "VCVFILL", - "/OPT", "OPEQN", "OPFACT", "OPFRST", "OPGRAD", - "OPKEEP", "OPLOOP", "OPPRNT", "OPRAND", "OPSUBP", - "OPSWEEP", "OPTYPE", "OPUSER", "OPVAR", "OPADD", - "OPCLR", "OPDEL", "OPMAKE", "OPSEL", "OPANL", "OPDATA", - "OPRESU", "OPSAVE", "OPEXE", "OPLFA", "OPLGR", - "OPLIST", "OPLSW", "OPRFA", "OPRGR", "OPRSW", - "PILECALC", "PILEDISPSET", "PILEGEN", "PILELOAD", - "PILEMASS", "PILERUN", "PILESEL", "PILESTIF", - "PLVAROPT", "PRVAROPT", "TOCOMP", "TODEF", "TOFREQ", - "TOTYPE", "TOVAR", "TOEXE", "TOLOOP", "TOGRAPH", - "TOLIST", "TOPLOT", "TOPRINT", "TOSTAT", "TZAMESH", - "TZDELE", "TZEGEN", "XVAROPT", "PGSAVE", "SOLCONTROL", - "TOTAL", "VTGEOM", "VTREAL", "VTSTAT") - - elafune = ("/ANUM", "AOFFST", "AOVLAP", "APLOT", "APPEND", "APTN", - "ARCLEN", "ARCTRM", "AREAS", "AREFINE", "AREMESH", - "AREVERSE", "AROTAT", "ARSCALE", "ARSYM", "ASBA", - "ASBL", "ASBV", "ASBW", "ASCRES", "ASEL", "ASIFILE", - "*ASK", "ASKIN", "ASLL", "ASLV", "ASOL", "/ASSIGN", - "ASUB", "ASUM", "ATAN", "ATRAN", "ATYPE", "/AUTO", - "AUTOTS", "/AUX2", "/AUX3", "/AUX12", "/AUX15", - "AVPRIN", "AVRES", "AWAVE", "/AXLAB", "*AXPY", - "/BATCH", "BCSOPTION", "BETAD", "BF", "BFA", "BFADELE", - "BFALIST", "BFCUM", "BFDELE", "BFE", "BFECUM", - "BFEDELE", "BFELIST", "BFESCAL", "BFINT", "BFK", - "BFKDELE", "BFKLIST", "BFL", "BFLDELE", "BFLIST", - "BFLLIST", "BFSCALE", "BFTRAN", "BFUNIF", "BFV", - "BFVDELE", "BFVLIST", "BIOOPT", "BIOT", "BLC4", "BLC5", - "BLOCK", "BOOL", "BOPTN", "BSAX", "BSMD", "BSM1", - "BSM2", "BSPLIN", "BSS1", "BSS2", "BSTE", "BSTQ", - "BTOL", "BUCOPT", "C", "CALC", "CAMPBELL", "CBDOF", - "CBMD", "CBMX", "CBTE", "CBTMP", "CDOPT", "CDREAD", - "CDWRITE", "CE", "CECHECK", "CECMOD", "CECYC", - "CEDELE", "CEINTF", "CELIST", "CENTER", "CEQN", - "CERIG", "CESGEN", "CFACT", "*CFCLOS", "*CFOPEN", - "*CFWRITE", "/CFORMAT", "CGLOC", "CGOMGA", "CGROW", - "CHECK", "CHKMSH", "CINT", "CIRCLE", "CISOL", - "/CLABEL", "/CLEAR", "CLOCAL", "CLOG", "/CLOG", - "CLRMSHLN", "CM", "CMACEL", "/CMAP", "CMATRIX", - "CMDELE", "CMDOMEGA", "CMEDIT", "CMGRP", "CMLIST", - "CMMOD", "CMOMEGA", "CMPLOT", "CMROTATE", "CMSEL", - "CMSFILE", "CMSOPT", "CMWRITE", "CNCHECK", "CNKMOD", - "CNTR", "CNVTOL", "/COLOR", "*COMP", "COMBINE", - "COMPRESS", "CON4", "CONE", "/CONFIG", "CONJUG", - "/CONTOUR", "/COPY", "CORIOLIS", "COUPLE", "COVAL", - "CP", "CPCYC", "CPDELE", "CPINTF", "/CPLANE", "CPLGEN", - "CPLIST", "CPMERGE", "CPNGEN", "CPSGEN", "CQC", - "*CREATE", "CRPLIM", "CS", "CSCIR", "CSDELE", "CSKP", - "CSLIST", "CSWPLA", "CSYS", "/CTYPE", "CURR2D", - "CUTCONTROL", "/CVAL", "CVAR", "/CWD", "CYCCALC", - "/CYCEXPAND", "CYCFILES", "CYCFREQ", "*CYCLE", - "CYCLIC", "CYCOPT", "CYCPHASE", "CYCSPEC", "CYL4", - "CYL5", "CYLIND", "CZDEL", "CZMESH", "D", "DA", - "DADELE", "DALIST", "DAMORPH", "DATA", "DATADEF", - "DCGOMG", "DCUM", "DCVSWP", "DDASPEC", "DDELE", - "DDOPTION", "DEACT", "DEFINE", "*DEL", "DELETE", - "/DELETE", "DELTIM", "DELTIME", "DEMORPH", "DERIV", "DESIZE", - "DESOL", "DETAB", "/DEVDISP", "/DEVICE", "/DFLAB", - "DFLX", "DFSWAVE", "DIG", "DIGIT", "*DIM", - "/DIRECTORY", "DISPLAY", "/DIST", "DJ", "DJDELE", - "DJLIST", "DK", "DKDELE", "DKLIST", "DL", "DLDELE", - "DLIST", "DLLIST", "*DMAT", "DMOVE", "DMPEXT", - "DMPOPTION", "DMPRAT", "DMPSTR", "DNSOL", "*DO", "DOF", - "DOFSEL", "DOMEGA", "*DOT", "*DOWHILE", "DSCALE", - "/DSCALE", "DSET", "DSPOPTION", "DSUM", "DSURF", - "DSYM", "DSYS", "DTRAN", "DUMP", "/DV3D", "DVAL", - "DVMORPH", "DYNOPT", "E", "EALIVE", "EDADAPT", "EDALE", - "EDASMP", "EDBOUND", "EDBX", "EDBVIS", "EDCADAPT", - "EDCGEN", "EDCLIST", "EDCMORE", "EDCNSTR", "EDCONTACT", - "EDCPU", "EDCRB", "EDCSC", "EDCTS", "EDCURVE", - "EDDAMP", "EDDBL", "EDDC", "EDDRELAX", "EDDUMP", - "EDELE", "EDENERGY", "EDFPLOT", "EDGCALE", "/EDGE", - "EDHGLS", "EDHIST", "EDHTIME", "EDINT", "EDIPART", - "EDIS", "EDLCS", "EDLOAD", "EDMP", "EDNB", "EDNDTSD", - "EDNROT", "EDOPT", "EDOUT", "EDPART", "EDPC", "EDPL", - "EDPVEL", "EDRC", "EDRD", "EDREAD", "EDRI", "EDRST", - "EDRUN", "EDSHELL", "EDSOLV", "EDSP", "EDSTART", - "EDTERM", "EDTP", "EDVEL", "EDWELD", "EDWRITE", - "EEXTRUDE", "/EFACET", "EGEN", "*EIGEN", "EINFIN", - "EINTF", "EKILL", "ELBOW", "ELEM", "ELIST", "*ELSE", - "*ELSEIF", "EMAGERR", "EMATWRITE", "EMF", "EMFT", - "EMID", "EMIS", "EMODIF", "EMORE", "EMSYM", "EMTGEN", - "EMUNIT", "EN", "*END", "*ENDDO", "*ENDIF", - "ENDRELEASE", "ENERSOL", "ENGEN", "ENORM", "ENSYM", - "EORIENT", "EPLOT", "EQSLV", "ERASE", "/ERASE", - "EREAD", "EREFINE", "EREINF", "ERESX", "ERNORM", - "ERRANG", "ESCHECK", "ESEL", "/ESHAPE", "ESIZE", - "ESLA", "ESLL", "ESLN", "ESLV", "ESOL", "ESORT", - "ESSOLV", "ESTIF", "ESURF", "ESYM", "ESYS", "ET", - "ETABLE", "ETCHG", "ETCONTROL", "ETDELE", "ETLIST", - "ETYPE", "EUSORT", "EWRITE", "*EXIT", "/EXIT", "EXP", - "EXPAND", "/EXPAND", "EXPASS", "*EXPORT", "EXPROFILE", - "EXPSOL", "EXTOPT", "EXTREM", "EXUNIT", "F", "/FACET", - "FATIGUE", "FC", "FCCHECK", "FCDELE", "FCLIST", "FCUM", - "FCTYP", "FDELE", "/FDELE", "FE", "FEBODY", "FECONS", - "FEFOR", "FELIST", "FESURF", "*FFT", "FILE", - "FILEAUX2", "FILEAUX3", "FILEDISP", "FILL", "FILLDATA", - "/FILNAME", "FINISH", "FITEM", "FJ", "FJDELE", - "FJLIST", "FK", "FKDELE", "FKLIST", "FL", "FLIST", - "FLLIST", "FLST", "FLUXV", "FLUREAD", "FMAGBC", - "FMAGSUM", "/FOCUS", "FOR2D", "FORCE", "FORM", - "/FORMAT", "FP", "FPLIST", "*FREE", "FREQ", "FRQSCL", - "FS", "FSCALE", "FSDELE", "FSLIST", "FSNODE", "FSPLOT", - "FSSECT", "FSSPARM", "FSUM", "FTCALC", "FTRAN", - "FTSIZE", "FTWRITE", "FTYPE", "FVMESH", "GAP", "GAPF", - "GAUGE", "GCDEF", "GCGEN", "/GCMD", "/GCOLUMN", - "GENOPT", "GEOM", "GEOMETRY", "*GET", "/GFILE", - "/GFORMAT", "/GLINE", "/GMARKER", "GMATRIX", "GMFACE", - "*GO", "/GO", "/GOLIST", "/GOPR", "GP", "GPDELE", - "GPLIST", "GPLOT", "/GRAPHICS", "/GRESUME", "/GRID", - "/GROPT", "GRP", "/GRTYP", "/GSAVE", "GSBDATA", - "GSGDATA", "GSLIST", "GSSOL", "/GST", "GSUM", "/GTHK", - "/GTYPE", "HARFRQ", "/HBC", "HBMAT", "/HEADER", "HELP", - "HELPDISP", "HEMIOPT", "HFANG", "HFSYM", "HMAGSOLV", - "HPGL", "HPTCREATE", "HPTDELETE", "HRCPLX", "HREXP", - "HROPT", "HROCEAN", "HROUT", "IC", "ICDELE", "ICLIST", - "/ICLWID", "/ICSCALE", "*IF", "IGESIN", "IGESOUT", - "/IMAGE", "IMAGIN", "IMESH", "IMMED", "IMPD", - "INISTATE", "*INIT", "/INPUT", "/INQUIRE", "INRES", - "INRTIA", "INT1", "INTSRF", "IOPTN", "IRLF", "IRLIST", - "*ITENGINE", "JPEG", "JSOL", "K", "KATT", "KBC", - "KBETW", "KCALC", "KCENTER", "KCLEAR", "KDELE", - "KDIST", "KEEP", "KESIZE", "KEYOPT", "KEYPTS", "KEYW", - "KFILL", "KGEN", "KL", "KLIST", "KMESH", "KMODIF", - "KMOVE", "KNODE", "KPLOT", "KPSCALE", "KREFINE", - "KSCALE", "KSCON", "KSEL", "KSLL", "KSLN", "KSUM", - "KSYMM", "KTRAN", "KUSE", "KWPAVE", "KWPLAN", "L", - "L2ANG", "L2TAN", "LANG", "LARC", "/LARC", "LAREA", - "LARGE", "LATT", "LAYER", "LAYERP26", "LAYLIST", - "LAYPLOT", "LCABS", "LCASE", "LCCALC", "LCCAT", - "LCDEF", "LCFACT", "LCFILE", "LCLEAR", "LCOMB", - "LCOPER", "LCSEL", "LCSL", "LCSUM", "LCWRITE", - "LCZERO", "LDELE", "LDIV", "LDRAG", "LDREAD", "LESIZE", - "LEXTND", "LFILLT", "LFSURF", "LGEN", "LGLUE", - "LGWRITE", "/LIGHT", "LINA", "LINE", "/LINE", "LINES", - "LINL", "LINP", "LINV", "LIST", "*LIST", "LLIST", - "LMATRIX", "LMESH", "LNSRCH", "LOCAL", "LOVLAP", - "LPLOT", "LPTN", "LREFINE", "LREVERSE", "LROTAT", - "LSBA", "*LSBAC", "LSBL", "LSBV", "LSBW", "LSCLEAR", - "LSDELE", "*LSDUMP", "LSEL", "*LSENGINE", "*LSFACTOR", - "LSLA", "LSLK", "LSOPER", "/LSPEC", "LSREAD", - "*LSRESTORE", "LSSCALE", "LSSOLVE", "LSTR", "LSUM", - "LSWRITE", "/LSYMBOL", "LSYMM", "LTAN", "LTRAN", - "LUMPM", "LVSCALE", "LWPLAN", "M", "MADAPT", "MAGOPT", - "MAGSOLV", "/MAIL", "MAP", "/MAP", "MAP2DTO3D", - "MAPSOLVE", "MAPVAR", "MASTER", "MAT", "MATER", - "MCHECK", "MDAMP", "MDELE", "MDPLOT", "MEMM", "/MENU", - "MESHING", "MFANALYSIS", "MFBUCKET", "MFCALC", "MFCI", - "MFCLEAR", "MFCMMAND", "MFCONV", "MFDTIME", "MFELEM", - "MFEM", "MFEXTER", "MFFNAME", "MFFR", "MFIMPORT", - "MFINTER", "MFITER", "MFLCOMM", "MFLIST", "MFMAP", - "MFORDER", "MFOUTPUT", "*MFOURI", "MFPSIMUL", "MFRC", - "MFRELAX", "MFRSTART", "MFSORDER", "MFSURFACE", - "MFTIME", "MFTOL", "*MFUN", "MFVOLUME", "MFWRITE", - "MGEN", "MIDTOL", "/MKDIR", "MLIST", "MMASS", "MMF", - "MODCONT", "MODE", "MODIFY", "MODMSH", "MODSELOPTION", - "MODOPT", "MONITOR", "*MOPER", "MOPT", "MORPH", "MOVE", - "MP", "MPAMOD", "MPCHG", "MPCOPY", "MPDATA", "MPDELE", - "MPDRES", "/MPLIB", "MPLIST", "MPPLOT", "MPREAD", - "MPRINT", "MPTEMP", "MPTGEN", "MPTRES", "MPWRITE", - "/MREP", "MSAVE", "*MSG", "MSHAPE", "MSHCOPY", - "MSHKEY", "MSHMID", "MSHPATTERN", "MSOLVE", "/MSTART", - "MSTOLE", "*MULT", "*MWRITE", "MXPAND", "N", "NANG", - "NAXIS", "NCNV", "NDELE", "NDIST", "NDSURF", "NEQIT", - "/NERR", "NFORCE", "NGEN", "NKPT", "NLADAPTIVE", - "NLDIAG", "NLDPOST", "NLGEOM", "NLHIST", "NLIST", - "NLMESH", "NLOG", "NLOPT", "NMODIF", "NOCOLOR", - "NODES", "/NOERASE", "/NOLIST", "NOOFFSET", "NOORDER", - "/NOPR", "NORA", "NORL", "/NORMAL", "NPLOT", "NPRINT", - "NREAD", "NREFINE", "NRLSUM", "*NRM", "NROPT", - "NROTAT", "NRRANG", "NSCALE", "NSEL", "NSLA", "NSLE", - "NSLK", "NSLL", "NSLV", "NSMOOTH", "NSOL", "NSORT", - "NSTORE", "NSUBST", "NSVR", "NSYM", "/NUMBER", - "NUMCMP", "NUMEXP", "NUMMRG", "NUMOFF", "NUMSTR", - "NUMVAR", "NUSORT", "NWPAVE", "NWPLAN", "NWRITE", - "OCDATA", "OCDELETE", "OCLIST", "OCREAD", "OCTABLE", - "OCTYPE", "OCZONE", "OMEGA", "OPERATE", "OPNCONTROL", - "OUTAERO", "OUTOPT", "OUTPR", "/OUTPUT", "OUTRES", - "OVCHECK", "PADELE", "/PAGE", "PAGET", "PAPUT", - "PARESU", "PARTSEL", "PARRES", "PARSAV", "PASAVE", - "PATH", "PAUSE", "/PBC", "/PBF", "PCALC", "PCGOPT", - "PCIRC", "/PCIRCLE", "/PCOPY", "PCROSS", "PDANL", - "PDCDF", "PDCFLD", "PDCLR", "PDCMAT", "PDCORR", - "PDDMCS", "PDDOEL", "PDEF", "PDEXE", "PDHIST", - "PDINQR", "PDLHS", "PDMETH", "PDOT", "PDPINV", - "PDPLOT", "PDPROB", "PDRESU", "PDROPT", "/PDS", - "PDSAVE", "PDSCAT", "PDSENS", "PDSHIS", "PDUSER", - "PDVAR", "PDWRITE", "PERBC2D", "PERTURB", "PFACT", - "PHYSICS", "PIVCHECK", "PLCAMP", "PLCFREQ", "PLCHIST", - "PLCINT", "PLCPLX", "PLCRACK", "PLDISP", "PLESOL", - "PLETAB", "PLFAR", "PLF2D", "PLGEOM", "PLLS", "PLMAP", - "PLMC", "PLNEAR", "PLNSOL", "/PLOPTS", "PLORB", "PLOT", - "PLOTTING", "PLPAGM", "PLPATH", "PLSECT", "PLST", - "PLTIME", "PLTRAC", "PLVAR", "PLVECT", "PLZZ", - "/PMACRO", "PMAP", "PMGTRAN", "PMLOPT", "PMLSIZE", - "/PMORE", "PNGR", "/PNUM", "POINT", "POLY", "/POLYGON", - "/POST1", "/POST26", "POWERH", "PPATH", "PRANGE", - "PRAS", "PRCAMP", "PRCINT", "PRCPLX", "PRED", - "PRENERGY", "/PREP7", "PRERR", "PRESOL", "PRETAB", - "PRFAR", "PRI2", "PRIM", "PRINT", "*PRINT", "PRISM", - "PRITER", "PRJSOL", "PRNEAR", "PRNLD", "PRNSOL", - "PROD", "PRORB", "PRPATH", "PRRFOR", "PRRSOL", - "PRSCONTROL", "PRSECT", "PRTIME", "PRVAR", "PRVECT", - "PSCONTROL", "PSCR", "PSDCOM", "PSDFRQ", "PSDGRAPH", - "PSDRES", "PSDSPL", "PSDUNIT", "PSDVAL", "PSDWAV", - "/PSEARCH", "PSEL", "/PSF", "PSMAT", "PSMESH", - "/PSPEC", "/PSTATUS", "PSTRES", "/PSYMB", "PTR", - "PTXY", "PVECT", "/PWEDGE", "QDVAL", "QRDOPT", "QSOPT", - "QUAD", "/QUIT", "QUOT", "R", "RACE", "RADOPT", - "RAPPND", "RATE", "/RATIO", "RBE3", "RCON", "RCYC", - "RDEC", "RDELE", "READ", "REAL", "REALVAR", "RECTNG", - "REMESH", "/RENAME", "REORDER", "*REPEAT", "/REPLOT", - "RESCOMBINE", "RESCONTROL", "RESET", "/RESET", "RESP", - "RESUME", "RESVEC", "RESWRITE", "*RETURN", "REXPORT", - "REZONE", "RFORCE", "/RGB", "RIGID", "RIGRESP", - "RIMPORT", "RLIST", "RMALIST", "RMANL", "RMASTER", - "RMCAP", "RMCLIST", "/RMDIR", "RMFLVEC", "RMLVSCALE", - "RMMLIST", "RMMRANGE", "RMMSELECT", "RMNDISP", - "RMNEVEC", "RMODIF", "RMORE", "RMPORDER", "RMRESUME", - "RMRGENERATE", "RMROPTIONS", "RMRPLOT", "RMRSTATUS", - "RMSAVE", "RMSMPLE", "RMUSE", "RMXPORT", "ROCK", - "ROSE", "RPOLY", "RPR4", "RPRISM", "RPSD", "RSFIT", - "RSOPT", "RSPLIT", "RSPLOT", "RSPRNT", "RSSIMS", - "RSTMAC", "RSTOFF", "RSURF", "RSYMM", "RSYS", "RTHICK", - "SABS", "SADD", "SALLOW", "SAVE", "SBCLIST", "SBCTRAN", - "SDELETE", "SE", "SECCONTROL", "SECDATA", - "SECFUNCTION", "SECJOINT", "/SECLIB", "SECLOCK", - "SECMODIF", "SECNUM", "SECOFFSET", "SECPLOT", - "SECREAD", "SECSTOP", "SECTYPE", "SECWRITE", "SED", - "SEDLIST", "SEEXP", "/SEG", "SEGEN", "SELIST", "SELM", - "SELTOL", "SENERGY", "SEOPT", "SESYMM", "*SET", "SET", - "SETFGAP", "SETRAN", "SEXP", "SF", "SFA", "SFACT", - "SFADELE", "SFALIST", "SFBEAM", "SFCALC", "SFCUM", - "SFDELE", "SFE", "SFEDELE", "SFELIST", "SFFUN", - "SFGRAD", "SFL", "SFLDELE", "SFLEX", "SFLIST", - "SFLLIST", "SFSCALE", "SFTRAN", "/SHADE", "SHELL", - "/SHOW", "/SHOWDISP", "SHPP", "/SHRINK", "SLIST", - "SLOAD", "SMALL", "*SMAT", "SMAX", "/SMBC", "SMBODY", - "SMCONS", "SMFOR", "SMIN", "SMOOTH", "SMRTSIZE", - "SMSURF", "SMULT", "SNOPTION", "SOLU", "/SOLU", - "SOLUOPT", "SOLVE", "SORT", "SOURCE", "SPACE", - "SPCNOD", "SPCTEMP", "SPDAMP", "SPEC", "SPFREQ", - "SPGRAPH", "SPH4", "SPH5", "SPHERE", "SPLINE", "SPLOT", - "SPMWRITE", "SPOINT", "SPOPT", "SPREAD", "SPTOPT", - "SPOWER", "SPUNIT", "SPVAL", "SQRT", "*SREAD", "SRSS", - "SSBT", "/SSCALE", "SSLN", "SSMT", "SSPA", "SSPB", - "SSPD", "SSPE", "SSPM", "SSUM", "SSTATE", "STABILIZE", - "STAOPT", "STAT", "*STATUS", "/STATUS", "STEF", - "STORE", "SUBOPT", "SUBSET", "SUCALC", - "SUCR", "SUDEL", "SUEVAL", "SUGET", "SUMAP", "SUMTYPE", - "SUPL", "SUPR", "SURESU", "SUSAVE", "SUSEL", "SUVECT", - "SV", "SVPLOT", "SVTYP", "SWADD", "SWDEL", "SWGEN", - "SWLIST", "SYNCHRO", "/SYP", "/SYS", "TALLOW", - "TARGET", "*TAXIS", "TB", "TBCOPY", "TBDATA", "TBDELE", - "TBEO", "TBIN", "TBFIELD", "TBFT", "TBLE", "TBLIST", - "TBMODIF", "TBPLOT", "TBPT", "TBTEMP", "TCHG", "/TEE", - "TERM", "THEXPAND", "THOPT", "TIFF", "TIME", - "TIMERANGE", "TIMINT", "TIMP", "TINTP", - "/TLABEL", "TOFFST", "*TOPER", "TORQ2D", "TORQC2D", - "TORQSUM", "TORUS", "TRANS", "TRANSFER", "*TREAD", - "TREF", "/TRIAD", "/TRLCY", "TRNOPT", "TRPDEL", - "TRPLIS", "TRPOIN", "TRTIME", "TSHAP", "/TSPEC", - "TSRES", "TUNIF", "TVAR", "/TXTRE", "/TYPE", "TYPE", - "/UCMD", "/UDOC", "/UI", "UIMP", "/UIS", "*ULIB", "/UPF", - "UNDELETE", "UNDO", "/UNITS", "UNPAUSE", "UPCOORD", - "UPGEOM", "*USE", "/USER", "USRCAL", "USRDOF", - "USRELEM", "V", "V2DOPT", "VA", "*VABS", "VADD", - "VARDEL", "VARNAM", "VATT", "VCLEAR", "*VCOL", - "/VCONE", "VCROSS", "*VCUM", "VDDAM", "VDELE", "VDGL", - "VDOT", "VDRAG", "*VEC", "*VEDIT", "VEORIENT", "VEXT", - "*VFACT", "*VFILL", "VFOPT", "VFQUERY", "VFSM", - "*VFUN", "VGEN", "*VGET", "VGET", "VGLUE", "/VIEW", - "VIMP", "VINP", "VINV", "*VITRP", "*VLEN", "VLIST", - "VLSCALE", "*VMASK", "VMESH", "VOFFST", "VOLUMES") - - # list of in-built () functions - elafunf = ("NX()", "NY()", "NZ()", "KX()", "KY()", "KZ()", "LX()", - "LY()", "LZ()", "LSX()", "LSY()", "LSZ()", "NODE()", - "KP()", "DISTND()", "DISTKP()", "DISTEN()", "ANGLEN()", - "ANGLEK()", "NNEAR()", "KNEAR()", "ENEARN()", - "AREAND()", "AREAKP()", "ARNODE()", "NORMNX()", - "NORMNY()", "NORMNZ()", "NORMKX()", "NORMKY()", - "NORMKZ()", "ENEXTN()", "NELEM()", "NODEDOF()", - "ELADJ()", "NDFACE()", "NMFACE()", "ARFACE()", "UX()", - "UY()", "UZ()", "ROTX()", "ROTY()", "ROTZ()", "TEMP()", - "PRES()", "VX()", "VY()", "VZ()", "ENKE()", "ENDS()", - "VOLT()", "MAG()", "AX()", "AY()", "AZ()", - "VIRTINQR()", "KWGET()", "VALCHR()", "VALHEX()", - "CHRHEX()", "STRFILL()", "STRCOMP()", "STRPOS()", - "STRLENG()", "UPCASE()", "LWCASE()", "JOIN()", - "SPLIT()", "ABS()", "SIGN()", "CXABS()", "EXP()", - "LOG()", "LOG10()", "SQRT()", "NINT()", "MOD()", - "RAND()", "GDIS()", "SIN()", "COS()", "TAN()", - "SINH()", "COSH()", "TANH()", "ASIN()", "ACOS()", - "ATAN()", "ATAN2()") - - elafung = ("NSEL()", "ESEL()", "KSEL()", "LSEL()", "ASEL()", - "VSEL()", "NDNEXT()", "ELNEXT()", "KPNEXT()", - "LSNEXT()", "ARNEXT()", "VLNEXT()", "CENTRX()", - "CENTRY()", "CENTRZ()") - - elafunh = ("~CAT5IN", "~CATIAIN", "~PARAIN", "~PROEIN", "~SATIN", - "~UGIN", "A", "AADD", "AATT", "ABEXTRACT", "*ABBR", - "ABBRES", "ABBSAV", "ABS", "ACCAT", "ACCOPTION", - "ACEL", "ACLEAR", "ADAMS", "ADAPT", "ADD", "ADDAM", - "ADELE", "ADGL", "ADRAG", "AESIZE", "AFILLT", "AFLIST", - "AFSURF", "*AFUN", "AGEN", "AGLUE", "AINA", "AINP", - "AINV", "AL", "ALIST", "ALLSEL", "ALPHAD", "AMAP", - "AMESH", "/AN3D", "ANCNTR", "ANCUT", "ANCYC", "ANDATA", - "ANDSCL", "ANDYNA", "/ANFILE", "ANFLOW", "/ANGLE", - "ANHARM", "ANIM", "ANISOS", "ANMODE", "ANMRES", - "/ANNOT", "ANORM", "ANPRES", "ANSOL", "ANSTOAQWA", - "ANSTOASAS", "ANTIME", "ANTYPE") - - special = ("/COM", "/TITLE", "STITLE") - - elements = ("SOLID5", - "LINK11", - "PLANE13", - "COMBIN14", - "MASS2", - "PLANE25", - "MATRIX27", - "FLUID29", - "FLUID30", - "LINK31", - "LINK33", - "LINK34", - "PLANE35", - "SOURC36", - "COMBIN37", - "FLUID38", - "COMBIN39", - "COMBIN40", - "INFIN47", - "MATRIX50", - "PLANE55", - "SHELL61", - "LINK68", - "SOLID70", - "MASS71", - "PLANE75", - "PLANE77", - "PLANE78", - "PLANE83", - "SOLID87", - "SOLID90", - "CIRCU94", - "SOLID96", - "SOLID98", - "INFIN110", - "INFIN111", - "FLUID116", - "PLANE121", - "SOLID122", - "SOLID123", - "CIRCU124", - "CIRCU125", - "TRANS126", - "FLUID129", - "FLUID130", - "SHELL131", - "SHELL132", - "FLUID136", - "FLUID138", - "FLUID139", - "SURF151", - "SURF152", - "SURF153", - "SURF154", - "SURF155", - "SURF156", - "SHELL157", - "SURF159", - "TARGE169", - "TARGE170", - "CONTA172", - "CONTA174", - "CONTA175", - "CONTA177", - "CONTA178", - "PRETS179", - "LINK180", - "SHELL181", - "PLANE182", - "PLANE183", - "MPC184", - "SOLID185", - "SOLID186", - "SOLID187", - "BEAM188", - "BEAM189", - "SOLSH190", - "INTER192", - "INTER193", - "INTER194", - "INTER195", - "MESH200", - "FOLLW201", - "INTER202", - "INTER203", - "INTER204", - "INTER205", - "SHELL208", - "SHELL209", - "CPT212", - "CPT213", - "COMBI214", - "CPT215", - "CPT216", - "CPT217", - "FLUID218", - "FLUID220", - "FLUID221", - "PLANE222", - "PLANE223", - "SOLID225", - "SOLID226", - "SOLID227", - "PLANE230", - "SOLID231", - "SOLID232", - "PLANE233", - "SOLID236", - "SOLID237", - "PLANE238", - "SOLID239", - "SOLID240", - "HSFLD241", - "HSFLD242", - "COMBI250", - "SURF251", - "SURF252", - "INFIN257", - "REINF263", - "REINF264", - "REINF265", - "SOLID272", - "SOLID273", - "SOLID278", - "SOLID279", - "CABLE280", - "SHELL281", - "SOLID285", - "PIPE288", - "PIPE289", - "ELBOW290", - "SOLID291", - "PLANE292", - "PLANE293", - "USER300") - - tokens = { - 'root': [ - (r'[^\S\n]+', Whitespace), - (words((elafunb+elafunc+elafund+elafune+elafunh+special), suffix=r'\b'), Keyword, 'non-keyword'), - default('non-keyword'), - ], - 'non-keyword': [ - (r'!.*\n', Comment, '#pop'), - (r'%.*?%', Escape), - include('strings'), - include('nums'), - (words((elafunf+elafung), suffix=r'\b'), Name.Builtin), - (words((elements), suffix=r'\b'), Name.Property), - include('core'), - (r'AR[0-9]+', Name.Variable.Instance), - (r'[a-z_][a-z0-9_]*', Name.Variable), - (r'\n+', Whitespace, '#pop'), - (r'[^\S\n]+', Whitespace), - ], - 'core': [ - # Operators - (r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=|\(|\))', Operator), - (r'/EOF', Generic.Emph), - (r'[\.(),:&;]', Punctuation), - ], - 'strings': [ - (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), - (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), - (r'[$%]', String.Symbol), - ], - 'nums': [ - (r'[+-]?\d*\.\d+([efEF][-+]?\d+)?', Number.Float), # with dot - (r'([+-]?\d+([efEF][-+]?\d+))', Number.Float), # With scientific notation - (r'\b\d+(?![.ef])', Number.Integer), # integer simple - ] - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/styles/abap.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/styles/abap.py deleted file mode 100644 index cdb8e9e75d31b82e37a2be9523e1b2074ebc264f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/styles/abap.py +++ /dev/null @@ -1,28 +0,0 @@ -""" - pygments.styles.abap - ~~~~~~~~~~~~~~~~~~~~ - - ABAP workbench like style. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.style import Style -from pygments.token import Keyword, Name, Comment, String, Error, \ - Number, Operator - - -class AbapStyle(Style): - - styles = { - Comment: 'italic #888', - Comment.Special: '#888', - Keyword: '#00f', - Operator.Word: '#00f', - Name: '#000', - Number: '#3af', - String: '#5a2', - - Error: '#F00', - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/semantic_version/django_fields.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/semantic_version/django_fields.py deleted file mode 100644 index e5bd7eb2340cfedad7ac387869ce20bdb053f112..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/semantic_version/django_fields.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) The python-semanticversion project -# This code is distributed under the two-clause BSD License. - -import warnings - -import django -from django.db import models - -if django.VERSION >= (3, 0): - # See https://docs.djangoproject.com/en/dev/releases/3.0/#features-deprecated-in-3-0 - from django.utils.translation import gettext_lazy as _ -else: - from django.utils.translation import ugettext_lazy as _ - -from . import base - - -class SemVerField(models.CharField): - - def __init__(self, *args, **kwargs): - kwargs.setdefault('max_length', 200) - super(SemVerField, self).__init__(*args, **kwargs) - - def from_db_value(self, value, expression, connection, *args): - """Convert from the database format. - - This should be the inverse of self.get_prep_value() - """ - return self.to_python(value) - - def get_prep_value(self, obj): - return None if obj is None else str(obj) - - def get_db_prep_value(self, value, connection, prepared=False): - if not prepared: - value = self.get_prep_value(value) - return value - - def value_to_string(self, obj): - value = self.to_python(self.value_from_object(obj)) - return str(value) - - def run_validators(self, value): - return super(SemVerField, self).run_validators(str(value)) - - -class VersionField(SemVerField): - default_error_messages = { - 'invalid': _("Enter a valid version number in X.Y.Z format."), - } - description = _("Version") - - def __init__(self, *args, **kwargs): - self.partial = kwargs.pop('partial', False) - if self.partial: - warnings.warn( - "Use of `partial=True` will be removed in 3.0.", - DeprecationWarning, - stacklevel=2, - ) - self.coerce = kwargs.pop('coerce', False) - super(VersionField, self).__init__(*args, **kwargs) - - def deconstruct(self): - """Handle django.db.migrations.""" - name, path, args, kwargs = super(VersionField, self).deconstruct() - kwargs['partial'] = self.partial - kwargs['coerce'] = self.coerce - return name, path, args, kwargs - - def to_python(self, value): - """Converts any value to a base.Version field.""" - if value is None or value == '': - return value - if isinstance(value, base.Version): - return value - if self.coerce: - return base.Version.coerce(value, partial=self.partial) - else: - return base.Version(value, partial=self.partial) - - -class SpecField(SemVerField): - default_error_messages = { - 'invalid': _("Enter a valid version number spec list in ==X.Y.Z,>=A.B.C format."), - } - description = _("Version specification list") - - def __init__(self, *args, **kwargs): - self.syntax = kwargs.pop('syntax', base.DEFAULT_SYNTAX) - super(SpecField, self).__init__(*args, **kwargs) - - def deconstruct(self): - """Handle django.db.migrations.""" - name, path, args, kwargs = super(SpecField, self).deconstruct() - if self.syntax != base.DEFAULT_SYNTAX: - kwargs['syntax'] = self.syntax - return name, path, args, kwargs - - def to_python(self, value): - """Converts any value to a base.Spec field.""" - if value is None or value == '': - return value - if isinstance(value, base.BaseSpec): - return value - return base.BaseSpec.parse(value, syntax=self.syntax) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/yaml/error.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/yaml/error.py deleted file mode 100644 index b796b4dc519512c4825ff539a2e6aa20f4d370d0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/yaml/error.py +++ /dev/null @@ -1,75 +0,0 @@ - -__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] - -class Mark: - - def __init__(self, name, index, line, column, buffer, pointer): - self.name = name - self.index = index - self.line = line - self.column = column - self.buffer = buffer - self.pointer = pointer - - def get_snippet(self, indent=4, max_length=75): - if self.buffer is None: - return None - head = '' - start = self.pointer - while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029': - start -= 1 - if self.pointer-start > max_length/2-1: - head = ' ... ' - start += 5 - break - tail = '' - end = self.pointer - while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': - end += 1 - if end-self.pointer > max_length/2-1: - tail = ' ... ' - end -= 5 - break - snippet = self.buffer[start:end] - return ' '*indent + head + snippet + tail + '\n' \ - + ' '*(indent+self.pointer-start+len(head)) + '^' - - def __str__(self): - snippet = self.get_snippet() - where = " in \"%s\", line %d, column %d" \ - % (self.name, self.line+1, self.column+1) - if snippet is not None: - where += ":\n"+snippet - return where - -class YAMLError(Exception): - pass - -class MarkedYAMLError(YAMLError): - - def __init__(self, context=None, context_mark=None, - problem=None, problem_mark=None, note=None): - self.context = context - self.context_mark = context_mark - self.problem = problem - self.problem_mark = problem_mark - self.note = note - - def __str__(self): - lines = [] - if self.context is not None: - lines.append(self.context) - if self.context_mark is not None \ - and (self.problem is None or self.problem_mark is None - or self.context_mark.name != self.problem_mark.name - or self.context_mark.line != self.problem_mark.line - or self.context_mark.column != self.problem_mark.column): - lines.append(str(self.context_mark)) - if self.problem is not None: - lines.append(self.problem) - if self.problem_mark is not None: - lines.append(str(self.problem_mark)) - if self.note is not None: - lines.append(self.note) - return '\n'.join(lines) - diff --git a/spaces/pseudolab/SonGPT/core/structure/conversation.py b/spaces/pseudolab/SonGPT/core/structure/conversation.py deleted file mode 100644 index 3f1b945eeaea29cef545f07abd9da492102544c3..0000000000000000000000000000000000000000 --- a/spaces/pseudolab/SonGPT/core/structure/conversation.py +++ /dev/null @@ -1,96 +0,0 @@ -from colorama import init, Fore, Style -from typing import Dict, List - -from .memory_handler import MemoryHandler -from .llm import BaseLLM - - -class Conversation: - """ - Class for managing the coversation with BestFriendGPT - """ - - def __init__( - self, llm: BaseLLM, memory_handler: MemoryHandler, user_name: str, ai_name: str - ): - self.llm = llm - self.memory_handler = memory_handler - self.config = { - "UPDATE_INTERVAL": 5, - "MEMORY_INTERVAL": 10, - } - self.user_name = user_name - self.ai_name = ai_name - self.starter = "Hi, How are you?" - - def start(self): - """ - Starts the conversation with BestFriendGPT - A wrapper around self.chat - """ - self.chat(user_name=self.user_name, ai_name=self.ai_name, starter=self.starter) - return - - def format_chat_log(self, message: str, by: str) -> Dict: - """ - by is one of 'AI', 'USER' - """ - chat_log = {"by": by, "message": message} - - return chat_log - - def convert_to_string(self, chat_log: List[Dict]) -> str: - chat_log_str = "" - for chat in chat_log: - name = chat["by"] - message = chat["message"] - chat_log_str += f"{name}: {message}\n" - chat_log_str = chat_log_str.rstrip("\n") - return chat_log_str - - def chat(self, user_name: str, ai_name: str, starter: str): - """ - Chats with the BestFriendGPT - For loop for chatting, saving memories, and retrieving memories. - """ - print(f"{Fore.GREEN}{ai_name}{Style.RESET_ALL}: {starter}") - - chat_history = [] - chat_history.append(self.format_chat_log(starter, ai_name)) - user_response_n = 0 - - while True: - user_message = str(input(f"{Fore.BLUE}{user_name}{Style.RESET_ALL}: ")) - chat_history.append(self.format_chat_log(user_message, user_name)) - - ########## MemoryHandler.retrieve_memory ########## - # Should decide whether to use one message or chat_log - retrieved_memory: List[str] = self.memory_handler.retrieve_memory( - user_message - ) - ################################################### - - # Generate AI response - ai_response = self.llm.get_response( - user_name=user_name, - ai_name=ai_name, - chat_history=self.convert_to_string( - chat_history[-self.config["UPDATE_INTERVAL"] * 2 :] - ), - memory=retrieved_memory, - ) - chat_history.append(self.format_chat_log(ai_response, ai_name)) - user_response_n += 1 - - ########## MemoryHandler.save_memory ########## - if user_response_n == self.config["MEMORY_INTERVAL"]: - self.memory_handler.save_memory( - user_name=user_name, - ai_name=ai_name, - chat_history=self.convert_to_string( - chat_history[-self.config["MEMORY_INTERVAL"] * 2 :] - ), - ) - ############################################### - - print(f"{Fore.GREEN}{ai_name}{Style.RESET_ALL}: {ai_response}") diff --git a/spaces/pszemraj/document-summarization/README.md b/spaces/pszemraj/document-summarization/README.md deleted file mode 100644 index 3240c8e9b369dfab81bca2d0d77055c222718f49..0000000000000000000000000000000000000000 --- a/spaces/pszemraj/document-summarization/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Document Summarization -emoji: 🌖 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: true -license: apache-2.0 ---- - -Check out the configuration reference at - -# README - Document Summarization - -The original demo/what this repo was built for can be found [here](https://huggingface.co/spaces/pszemraj/document-summarization) - -## Usage - -If you are using this **not** as a gradio demo on hf spaces, you can run it locally with: - -```bash -python app.py --share -``` - -To see all the available arguments, run `python app.py --help`. - -## Installation - -```bash -pip install -r requirements.txt -``` diff --git a/spaces/pyodide-demo/self-hosted/module.d.ts b/spaces/pyodide-demo/self-hosted/module.d.ts deleted file mode 100644 index a1e21fcd8a76c8419e5ad0dd188327816d6be0e9..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/module.d.ts +++ /dev/null @@ -1,27 +0,0 @@ -/** - * - * @param {undefined | function(): string} stdin - * @param {undefined | function(string)} stdout - * @param {undefined | function(string)} stderr - * @private - */ -export function setStandardStreams(stdin: undefined | (() => string), stdout: undefined | ((arg0: string) => any), stderr: undefined | ((arg0: string) => any)): void; -/** - * Make the home directory inside the virtual file system, - * then change the working directory to it. - * - * @param {string} path - * @private - */ -export function setHomeDirectory(path: string): void; -export type Module = any; -/** - * @typedef {import('emscripten').Module} Module - */ -/** - * The Emscripten Module. - * - * @private - * @type {Module} - */ -export let Module: any; diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Ambiera Image Size Reducer Pro 1.3.2 Incl VERIFIED Crack.md b/spaces/quidiaMuxgu/Expedit-SAM/Ambiera Image Size Reducer Pro 1.3.2 Incl VERIFIED Crack.md deleted file mode 100644 index 4e90da10c494d7825bf0a2c52ec8bacdf177c4b5..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Ambiera Image Size Reducer Pro 1.3.2 Incl VERIFIED Crack.md +++ /dev/null @@ -1,8 +0,0 @@ - -

aspectratio fix and aspectratiofix 3.0.1.11 is a powerful and easy-to-use software for correcting image distortion in digital photos. aspectratio fix and aspectratiofix 3.11 is a software for correcting image distortion in digital photos. this software is very effective and easy to use. in this software, you can adjust the width and the height of your images.you may also likexara photo & graphic designer with crack free download []

-

lazy eye fix is a powerful and easy-to-use software for correcting image distortion in digital photos. lazy eye fix is a powerful and easy-to-use software for correcting image distortion in digital photos. lazy eye fix is a very effective software that can fix image distortion in digital photos. this software is very effective and easy to use. in this software, you can adjust the width and the height of your images.you may also likexara photo & graphic designer with crack free download []

-

Ambiera Image Size Reducer Pro 1.3.2 Incl Crack


Download Zip ✑ ✑ ✑ https://geags.com/2uCrDk



-

imagefix pro is a powerful and easy-to-use software for correcting image distortion in digital photos. imagefix pro is a powerful and easy-to-use software for correcting image distortion in digital photos. imagefix pro is very effective and easy to use. in this software, you can adjust the width and the height of your images. you may also likexara photo & graphic designer with crack free download []

-

mozilla firefox 4.0 crack is a open source web browser developed by the mozilla corporation. firefox web browser is included in microsoft windows operating system as the default web browser. it is the most popular web browser used on personal computers worldwide.the firefox crack download version is the latest and stable version. it includes many new features, like the ability to add bookmarks, sync your browsing history and passwords, use tabbed browsing, view multiple pages at the same time, use pop-up blocking, and add-ons, such as the popular adblock plus. mozilla firefox 4.0 activation key is the perfect software for saving time and effort.

899543212b
-
-
\ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Autodesk Architectural Desktop 2006 3CD Crack !FREE!.md b/spaces/quidiaMuxgu/Expedit-SAM/Autodesk Architectural Desktop 2006 3CD Crack !FREE!.md deleted file mode 100644 index 426c578df30433db8eef9955a6e6ba62df22e66e..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Autodesk Architectural Desktop 2006 3CD Crack !FREE!.md +++ /dev/null @@ -1,29 +0,0 @@ -
-`

How to Install and Use Autodesk Architectural Desktop 2006 on Windows 10

` -`

Autodesk Architectural Desktop 2006 is a software that allows you to create and edit architectural drawings and models. It was released in 2005 and is no longer supported by Autodesk. However, if you still have a valid license and installation package, you may be able to run it on Windows 10 with some tweaks and adjustments. Here are some steps you can follow to install and use Autodesk Architectural Desktop 2006 on Windows 10.

` -`
    ` -`
  1. Before you install Autodesk Architectural Desktop 2006, make sure you have the latest updates for Windows 10. You can check for updates by going to Settings > Update & Security > Windows Update.
  2. ` -`
  3. Next, you need to set the compatibility mode for the installation package. Right-click on the setup.exe file and select Properties. Go to the Compatibility tab and check the box that says "Run this program in compatibility mode for". Choose Windows XP (Service Pack 3) from the drop-down menu. Click Apply and OK.
  4. ` -`
  5. Now you can run the setup.exe file as an administrator. Follow the instructions on the screen to install Autodesk Architectural Desktop 2006. You may need to enter your serial number and product key during the installation.
  6. ` -`
  7. After the installation is complete, you need to set the compatibility mode for the program executable. Right-click on the acad.exe file (usually located in C:\Program Files\Autodesk\Architectural Desktop 2006) and select Properties. Go to the Compatibility tab and check the box that says "Run this program in compatibility mode for". Choose Windows XP (Service Pack 3) from the drop-down menu. Also check the box that says "Run this program as an administrator". Click Apply and OK.
  8. ` -`
  9. You can now launch Autodesk Architectural Desktop 2006 from the Start menu or desktop shortcut. You may see some warning messages or errors, but you can ignore them or click OK. The program should run normally on Windows 10.
  10. ` -`
` -`

Note: This method is not guaranteed to work for everyone, as some features or functions may not be compatible with Windows 10. If you encounter any problems or issues, you may need to upgrade to a newer version of Autodesk software or contact your vendor for support.

-

Autodesk Architectural Desktop 2006 3CD crack


DOWNLOAD ✪✪✪ https://geags.com/2uCsQt



` - -`

Autodesk Architectural Desktop 2006 is a powerful software that can help you create and edit architectural drawings and models. It has many features and tools that can enhance your productivity and creativity. Some of the features include:

` -`
    ` -`
  • Object-based design: You can create and modify objects such as walls, doors, windows, roofs, stairs, and more. You can also apply styles and properties to these objects to customize their appearance and behavior.
  • ` -`
  • Project management: You can organize your drawings and models into projects and subprojects. You can also use project standards to ensure consistency and quality across your project.
  • ` -`
  • Documentation: You can generate and update annotations, dimensions, schedules, tables, and other documentation elements automatically from your drawings and models. You can also use layers, layouts, viewports, and plotting to control the display and output of your documentation.
  • ` -`
  • Collaboration: You can share your drawings and models with other users and applications using various formats and methods. You can also use external references, data links, and data extraction to link your drawings and models to external sources of information.
  • ` -`
` -`

Autodesk Architectural Desktop 2006 is a software that can help you create and edit architectural drawings and models. However, it is also an old software that is no longer supported by Autodesk. Therefore, you may encounter some challenges and limitations when using it on Windows 10. Some of the challenges and limitations include:

` -`
    ` -`
  • Compatibility: As mentioned earlier, you need to set the compatibility mode for both the installation package and the program executable to run Autodesk Architectural Desktop 2006 on Windows 10. This may not work for everyone or for every feature or function of the software.
  • ` -`
  • Security: Autodesk Architectural Desktop 2006 may not have the latest security patches or updates to protect your system from viruses, malware, or other threats. You may also need to disable some security features or settings on Windows 10 to allow the software to run properly.
  • ` -`
  • Performance: Autodesk Architectural Desktop 2006 may not run as fast or as smoothly as newer versions of Autodesk software on Windows 10. You may experience some lagging, crashing, freezing, or other errors when using the software.
  • ` -`
  • Support: Autodesk Architectural Desktop 2006 is no longer supported by Autodesk. This means that you will not receive any technical support or customer service from Autodesk if you encounter any problems or issues with the software. You may also have difficulty finding online resources or forums to help you with the software.
  • ` -`
`

d5da3c52bf
-
-
\ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Download HOT HAL7600 V.1.2 Windows 7.md b/spaces/quidiaMuxgu/Expedit-SAM/Download HOT HAL7600 V.1.2 Windows 7.md deleted file mode 100644 index e74492f8c80a6b7cb911120f01a081798c10051a..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Download HOT HAL7600 V.1.2 Windows 7.md +++ /dev/null @@ -1,8 +0,0 @@ - -

HAL7600 V.1.2 Windows 7 ISO & WAT fix fix software removal tool remove requires 1GB of disk space. The tool consists of a typical Windows- 7 activation related file library. You can use this tool to remove activators from your Windows 7, such as remove, HAL7600, Chew7, Rodin, etc. You can use the tool to configure the required system files, including organizing VBS data and making other necessary decisions.

-

Download HAL7600 V.1.2 Windows 7


Download ✓✓✓ https://geags.com/2uCrjK



-

HAL7600 V.1.2 Windows 7 ISO & WAT fix software removal tool remove is currently being developed and there is no pre-release version. This tool depends on the release version number of the file. Your results may vary.

-

HAL7600 V.1.2 Windows 7 ISO & WAT fix software removal tool remove requires 1GB of disk space. The tool consists of a typical Windows- 7 activation related file library. You can use this tool to remove activators from your Windows 7, such as remove, HAL7600, Chew7, Rodin, etc. You can use the tool to configure the required system files, including organizing VBS data and making other necessary decisions.

-

RemoveWAT.exe remove window w10 removes activation on the Windows 10 operating system. There is no need to use the product key anymore because you can remove the activation and de-activate the operating system. RemoveWAT has most of the quality and standards of Microsoft after activation and data removal. It has all the tools needed to make it easy and convenient to remove your Microsoft activation, the activation key is removed from the registry. To de-activate the operating system, remove the WAT data from the Microsoft Active Directory. RemoveWAT is a completely free tool but was not developed by Microsoft, so it will not be asked for the Microsoft key.

899543212b
-
-
\ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Dts Audio Driver Download [EXCLUSIVE].md b/spaces/quidiaMuxgu/Expedit-SAM/Dts Audio Driver Download [EXCLUSIVE].md deleted file mode 100644 index 4d4813ea2f021b91a27073f4ed1ff8603523b57d..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Dts Audio Driver Download [EXCLUSIVE].md +++ /dev/null @@ -1,8 +0,0 @@ -

Dts Audio Driver Download


Download ……… https://geags.com/2uCrQX



- -1.x) DTS Audio Driver 1.10.4.0 for Windows 11 64-bit. 48 downloads ... downloads. Sound Card | Other. Windows 10 64 bit. 6 Jan 2022, 09:54 GMT. download ... DTS Audio Driver 1.10.4.0 for Windows 7 64-bit / 8 64-bit. 47 downloads. - ... downloads. Sound Card | Download free programs, games for windows, apple, linux and mobile ... - -... Drivers for Windows 7 - Windows 10 Driver - Windows 8a78ff9644
-
-
-

diff --git a/spaces/raaec/Pix2Pix-Video-prv/app.py b/spaces/raaec/Pix2Pix-Video-prv/app.py deleted file mode 100644 index aace586f6434819ddb6b66cd2dd34cfdcc71ebe5..0000000000000000000000000000000000000000 --- a/spaces/raaec/Pix2Pix-Video-prv/app.py +++ /dev/null @@ -1,222 +0,0 @@ -import gradio as gr -import os -import cv2 -import numpy as np -from moviepy.editor import * -from share_btn import community_icon_html, loading_icon_html, share_js - -from diffusers import StableDiffusionInstructPix2PixPipeline -import torch -from PIL import Image, ImageOps -import time -import psutil -import math -import random - - -pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix", torch_dtype=torch.float16, safety_checker=None) - -device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶" - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - - -def pix2pix( - input_image: Image.Image, - instruction: str, - steps: int, - seed: int, - text_cfg_scale: float, - image_cfg_scale: float, - ): - - width, height = input_image.size - factor = 512 / max(width, height) - factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height) - width = int((width * factor) // 64) * 64 - height = int((height * factor) // 64) * 64 - input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS) - - if instruction == "": - return [input_image, seed] - - generator = torch.manual_seed(seed) - edited_image = pipe( - instruction, image=input_image, - guidance_scale=text_cfg_scale, image_guidance_scale=image_cfg_scale, - num_inference_steps=steps, generator=generator, - ).images[0] - print(f"EDITED: {edited_image}") - return edited_image - - - -def get_frames(video_in): - frames = [] - #resize the video - clip = VideoFileClip(video_in) - - #check fps - if clip.fps > 30: - print("vide rate is over 30, resetting to 30") - clip_resized = clip.resize(height=512) - clip_resized.write_videofile("video_resized.mp4", fps=30) - else: - print("video rate is OK") - clip_resized = clip.resize(height=512) - clip_resized.write_videofile("video_resized.mp4", fps=clip.fps) - - print("video resized to 512 height") - - # Opens the Video file with CV2 - cap= cv2.VideoCapture("video_resized.mp4") - - fps = cap.get(cv2.CAP_PROP_FPS) - print("video fps: " + str(fps)) - i=0 - while(cap.isOpened()): - ret, frame = cap.read() - if ret == False: - break - cv2.imwrite('kang'+str(i)+'.jpg',frame) - frames.append('kang'+str(i)+'.jpg') - i+=1 - - cap.release() - cv2.destroyAllWindows() - print("broke the video into frames") - - return frames, fps - - -def create_video(frames, fps): - print("building video result") - clip = ImageSequenceClip(frames, fps=fps) - clip.write_videofile("movie.mp4", fps=fps) - - return 'movie.mp4' - - -def infer(prompt,video_in, seed_in, trim_value): - print(prompt) - break_vid = get_frames(video_in) - - frames_list= break_vid[0] - fps = break_vid[1] - n_frame = int(trim_value*fps) - - if n_frame >= len(frames_list): - print("video is shorter than the cut value") - n_frame = len(frames_list) - - result_frames = [] - print("set stop frames to: " + str(n_frame)) - - for i in frames_list[0:int(n_frame)]: - pil_i = Image.open(i).convert("RGB") - - pix2pix_img = pix2pix(pil_i, prompt, 50, seed_in, 7.5, 1.5) - #print(pix2pix_img) - #image = Image.open(pix2pix_img) - #rgb_im = image.convert("RGB") - - # exporting the image - pix2pix_img.save(f"result_img-{i}.jpg") - result_frames.append(f"result_img-{i}.jpg") - print("frame " + i + "/" + str(n_frame) + ": done;") - - final_vid = create_video(result_frames, fps) - print("finished !") - - return final_vid, gr.Group.update(visible=True) - -title = """ -
-
-

- Pix2Pix Video -

-
-

- Apply Instruct Pix2Pix Diffusion to a video -

-
-""" - -article = """ - - -
-

You may also like:

-
- - - - - - - -
- -
- -""" - -with gr.Blocks(css='style.css') as demo: - with gr.Column(elem_id="col-container"): - gr.HTML(title) - with gr.Row(): - with gr.Column(): - video_inp = gr.Video(label="Video source", source="upload", type="filepath", elem_id="input-vid") - prompt = gr.Textbox(label="Prompt", placeholder="enter prompt", show_label=False, elem_id="prompt-in") - with gr.Row(): - seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=123456) - trim_in = gr.Slider(label="Cut video at (s)", minimun=1, maximum=5, step=1, value=1) - with gr.Column(): - video_out = gr.Video(label="Pix2pix video result", elem_id="video-output") - gr.HTML(""" - Duplicate Space - work with longer videos / skip the queue: - """, elem_id="duplicate-container") - submit_btn = gr.Button("Generate Pix2Pix video") - - with gr.Group(elem_id="share-btn-container", visible=False) as share_group: - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button("Share to community", elem_id="share-btn") - - inputs = [prompt,video_inp,seed_inp, trim_in] - outputs = [video_out, share_group] - - #ex = gr.Examples( - # [ - # ["Make it a marble sculpture", "./examples/pexels-jill-burrow-7665249_512x512.mp4", 422112651, 4], - # ["Make it molten lava", "./examples/Ocean_Pexels_ 8953474_512x512.mp4", 43571876, 4] - # ], - # inputs=inputs, - # outputs=outputs, - # fn=infer, - # cache_examples=True, - #) - - gr.HTML(article) - - submit_btn.click(infer, inputs, outputs) - share_button.click(None, [], [], _js=share_js) - - - -demo.queue(max_size=12).launch() diff --git a/spaces/ramkamal2000/voice-conversion-ddp/commons.py b/spaces/ramkamal2000/voice-conversion-ddp/commons.py deleted file mode 100644 index fc384912618494475bda9d68fa76530f4fe2a27b..0000000000000000000000000000000000000000 --- a/spaces/ramkamal2000/voice-conversion-ddp/commons.py +++ /dev/null @@ -1,171 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def rand_spec_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Activaradobeaftereffectscccrack !!INSTALL!!.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Activaradobeaftereffectscccrack !!INSTALL!!.md deleted file mode 100644 index 1b5ddb04608ff7b4dab1e60f1964d12c2cb6c6f0..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Activaradobeaftereffectscccrack !!INSTALL!!.md +++ /dev/null @@ -1,121 +0,0 @@ - -

Activar Adobe After Effects CC Crack: Cómo Hacerlo y Qué Beneficios Tiene

- -

Adobe After Effects CC es uno de los programas más populares y potentes para la creación de efectos visuales y gráficos en movimiento. Con este software, puedes dar vida a tus vídeos con animaciones, transiciones, textos, luces, sombras y mucho más. Adobe After Effects CC es el software líder en el sector de la composición y la animación utilizado por los profesionales de los efectos visuales y los gráficos en movimiento en todo el mundo. El programa se puede utilizar para el seguimiento, la composición, el croma y la animación. Además, también funciona como transcodificador de medios, editor de vídeo no lineal y editor de audio.

- -

Adobe After Effects CC es un programa de pago que requiere una suscripción mensual o anual para poder acceder a todas sus funciones y actualizaciones. Sin embargo, existe una forma de activar Adobe After Effects CC crack y disfrutar de todas sus ventajas sin tener que pagar nada. Se trata de un archivo modificado que se utiliza para saltarse la verificación de licencia del programa y acceder a él sin restricciones. En este artículo te vamos a explicar cómo activar Adobe After Effects CC crack, qué beneficios tiene y qué riesgos implica. Sigue leyendo y descubre todo lo que necesitas saber sobre este tema.

-

activaradobeaftereffectscccrack


DOWNLOADhttps://urlgoal.com/2uCLbc



- -

¿Qué es Adobe After Effects CC crack?

- -

Adobe After Effects CC crack es un archivo modificado que se utiliza para activar el programa sin necesidad de una licencia oficial. Al usar Adobe After Effects CC crack, podrás acceder a todas las funciones del programa sin restricciones y sin tener que pagar nada. Adobe After Effects CC crack suele estar disponible en forma de parche o keygen, que son programas que generan un código de activación o modifican el archivo original del programa para saltarse la verificación de licencia.

- -

Adobe After Effects CC crack se puede descargar de forma gratuita desde sitios web especializados en software pirata, como Torrent4You o YASIR252. Estos sitios suelen ofrecer diferentes versiones del programa con el crack incluido o por separado, así como instrucciones detalladas sobre cómo instalarlo y usarlo.

- -

¿Cómo activar Adobe After Effects CC crack?

- -

El proceso para activar Adobe After Effects CC crack puede variar según la versión del programa y el tipo de crack que se utilice. Sin embargo, los pasos generales suelen ser los siguientes:

- -
    -
  • Descarga Adobe After Effects CC crack desde el sitio web que prefieras.
  • -
  • Extrae el archivo .rar o .zip que has descargado con un programa como WinRAR o 7-Zip.
  • -
  • Abre la carpeta extraída y busca el archivo .exe del programa.
  • -
  • Doble clic sobre él y sigue las instrucciones en pantalla para instalar el programa.
  • -
  • Copia el archivo .dll del crack y pégalo en la carpeta de instalación del programa, reemplazando el original.
  • -
  • O bien, ejecuta el archivo .exe del keygen y genera un código de activación.
  • -
  • Abre el programa e introduce el código de activación cuando te lo pida.
  • -
  • Disfruta de Adobe After Effects CC crack con todas sus funciones.
  • -
- -

¿Qué beneficios tiene activar Adobe After Effects CC crack?

- -

Activar Adobe After Effects CC crack tiene algunos beneficios que podrían interesarte. En primer lugar, podrás ahorrar mucho dinero y usar tus recursos para otras cosas. En lugar de pagar una suscripción mensual o anual por el programa, podrás usarlo gratis y sin limitaciones. En segundo lugar, podrás descargar el programa en cualquier momento y desde cualquier lugar, sin tener que esperar la disponibilidad física o digital del programa. Además, podrás disfrutar del programa sin limitaciones de tiempo o de acceso, y sin tener que soportar posibles problemas técnicos o de compatibilidad.

- -

Otro beneficio de activar Adobe After Effects CC crack es que podrás acceder a las últimas novedades y mejoras del programa. Adobe After Effects CC se actualiza constantemente con nuevas funciones, correcciones de errores y optimizaciones de rendimiento. Al activar Adobe After Effects CC crack, podrás disfrutar de todas estas ventajas sin tener que esperar a que se lancen oficialmente o a que se actualice tu suscripción.

- -

¿Qué riesgos implica activar Adobe After Effects CC crack?

- -

Activar Adobe After Effects CC crack también implica algunos riesgos que debes tener en cuenta antes de hacerlo. En primer lugar, activar Adobe After Effects CC crack es ilegal y viola los derechos de autor de los desarrolladores y los editores del programa. Esto significa que podrías enfrentarte a sanciones legales o a acciones por parte de los propietarios del programa. En segundo lugar, activar Adobe After Effects CC crack puede exponer tu ordenador a virus, malware u otros programas dañinos que pueden infectar tu sistema o robar tus datos personales. Esto significa que debes tener mucho cuidado al elegir fuentes seguras y fiables para descargar el programa y usar un buen antivirus para proteger tu ordenador. En tercer lugar, activar Adobe After Effects CC crack puede comprometer la calidad y la funcionalidad del programa en sí. Esto significa que podrías tener problemas con la instalación, el inicio, la gráfica, el audio, el juego o el multijugador del programa. Esto significa que debes estar preparado para resolver posibles errores o fallos que puedas encontrar durante el uso del programa.

- -

Conclusión

- -

Adobe After Effects CC es un programa increíble para la creación de efectos visuales y gráficos en movimiento. Si quieres activar Adobe After Effects CC crack y disfrutar de todas sus ventajas sin pagar nada, te hemos explicado en este artículo cómo hacerlo y cuáles son los riesgos y los beneficios que implica. Esperamos que este artículo te haya sido útil y te invitamos a dejar un comentario con tu opinión o tus dudas sobre el tema. ¡Que te diviertas con Adobe After Effects CC!

-

¿Qué alternativas existen a activar Adobe After Effects CC crack?

- -

Si no quieres arriesgarte a activar Adobe After Effects CC crack, pero tampoco quieres pagar una suscripción por el programa, existen algunas alternativas que puedes considerar. Una de ellas es usar la versión de prueba gratuita del programa, que te permite acceder a todas las funciones del programa durante 7 días sin restricciones ni compromisos. Para usar la versión de prueba gratuita del programa, solo tienes que crear una cuenta gratuita en la página web oficial de Adobe e iniciar sesión con ella en el programa. Otra alternativa es usar programas similares a Adobe After Effects CC, pero gratuitos o más baratos. Algunos ejemplos son Blender, HitFilm Express o DaVinci Resolve. Estos programas te ofrecen funciones parecidas a las de Adobe After Effects CC, pero sin coste alguno o con un precio más asequible.

-

- -

¿Cómo usar Adobe After Effects CC crack para crear efectos visuales increíbles?

- -

Una vez que hayas activado Adobe After Effects CC crack, podrás usar el programa para crear efectos visuales y gráficos en movimiento increíbles para tus vídeos. Para ello, tendrás que seguir algunos pasos básicos:

- -
    -
  • Abre el programa y crea un nuevo proyecto o abre uno existente.
  • -
  • Importa los archivos de vídeo, audio o imagen que quieras usar en tu composición.
  • -
  • Añade capas a tu composición y ajusta sus propiedades, como la posición, la escala, la rotación o la opacidad.
  • -
  • Aplica efectos a tus capas desde el panel de efectos y ajusta sus parámetros desde el panel de controles de efectos.
  • -
  • Anima tus capas usando fotogramas clave o expresiones en el panel de línea de tiempo.
  • -
  • Añade texto, formas o máscaras a tus capas desde el panel de herramientas.
  • -
  • Previsualiza tu composición desde el panel de composición y haz los cambios que necesites.
  • -
  • Exporta tu composición desde el panel de cola de procesamiento o desde el menú archivo.
  • -
- -

¿Qué consejos y trucos puedes seguir para mejorar tus habilidades con Adobe After Effects CC crack?

- -

Si quieres mejorar tus habilidades con Adobe After Effects CC crack y crear efectos visuales y gráficos en movimiento más impresionantes, puedes seguir algunos consejos y trucos que te ayudarán a optimizar tu flujo de trabajo y a sacarle el máximo partido al programa. Algunos de ellos son:

- -
    -
  • Aprende a usar los atajos de teclado para acceder rápidamente a las funciones más comunes del programa.
  • -
  • Usa las guías y las cuadrículas para alinear y distribuir tus capas con precisión.
  • -
  • Usa las precomposiciones para organizar y agrupar tus capas y aplicarles efectos o animaciones comunes.
  • -
  • Usa las capas de ajuste para aplicar efectos globales a tu composición sin afectar a las capas individuales.
  • -
  • Usa las capas nulas para controlar otras capas mediante expresiones o vinculaciones.
  • -
  • Usa los marcadores para añadir notas o comentarios a tu línea de tiempo o para sincronizar tus animaciones con el audio.
  • -
  • Usa las plantillas de animación para guardar y reutilizar tus animaciones favoritas.
  • -
  • Usa los scripts y los plugins para ampliar las funciones y las posibilidades del programa.
  • -
- -

Conclusión

- -

Adobe After Effects CC es un programa fantástico para la creación de efectos visuales y gráficos en movimiento. Si quieres activar Adobe After Effects CC crack y disfrutar de todas sus ventajas sin pagar nada, te hemos explicado en este artículo cómo hacerlo y cuáles son los riesgos, los beneficios y las alternativas que existen. También te hemos dado algunos consejos y trucos para mejorar tus habilidades con el programa y crear efectos visuales increíbles. Esperamos que este artículo te haya sido útil y te invitamos a dejar un comentario con tu opinión o tus dudas sobre el tema. ¡Que te diviertas con Adobe After Effects CC!

-

¿Qué requisitos necesita tu ordenador para activar Adobe After Effects CC crack?

- -

Para activar Adobe After Effects CC crack y usar el programa sin problemas, tu ordenador debe cumplir unos requisitos mínimos de hardware y software. Estos requisitos pueden variar según la versión del programa y el tipo de crack que uses, pero en general son los siguientes:

- -
    -
  • Sistema operativo: Windows 10 de 64 bits.
  • -
  • Procesador: Intel Core i3 o AMD equivalente con al menos 2 núcleos y 3 GHz de velocidad.
  • -
  • Memoria RAM: 8 GB como mínimo, 16 GB o más recomendados.
  • -
  • Disco duro: 40 GB de espacio libre como mínimo, SSD recomendado.
  • -
  • Tarjeta gráfica: NVIDIA o AMD con al menos 2 GB de VRAM y compatibilidad con OpenGL 2.0.
  • -
  • Resolución de pantalla: 1280 x 800 como mínimo, Full HD o superior recomendado.
  • -
  • Conexión a internet: necesaria para la descarga, la instalación y la activación del programa.
  • -
- -

Si tu ordenador no cumple estos requisitos, es posible que no puedas activar Adobe After Effects CC crack o que el programa no funcione correctamente. En ese caso, te recomendamos que actualices tu ordenador o que busques otra alternativa al programa.

- -

¿Qué proyectos puedes crear con Adobe After Effects CC crack?

- -

Con Adobe After Effects CC crack podrás crear una gran variedad de proyectos de efectos visuales y gráficos en movimiento para tus vídeos. Algunos ejemplos de lo que puedes hacer con este programa son:

- -
    -
  • Crear títulos animados y dinámicos para tus vídeos.
  • -
  • Añadir efectos especiales como explosiones, fuego, rayos, nieve o partículas a tus vídeos.
  • -
  • Simular elementos 3D como objetos, escenarios o personajes en tus vídeos.
  • -
  • Hacer seguimiento de movimiento y reemplazar fondos o elementos en tus vídeos.
  • -
  • Crear transiciones originales y fluidas entre escenas o planos en tus vídeos.
  • -
  • Mejorar el color, el contraste, la iluminación o el sonido de tus vídeos.
  • -
  • Crear animaciones de personajes, logotipos o infografías para tus vídeos.
  • -
  • Crear vídeos interactivos o inmersivos con realidad aumentada o virtual.
  • -
- -

Estos son solo algunos ejemplos de lo que puedes hacer con Adobe After Effects CC crack, pero las posibilidades son infinitas. Solo tienes que usar tu imaginación y tu creatividad para crear proyectos únicos y sorprendentes con este programa.

- -

Conclusión

- -

Adobe After Effects CC es un programa fantástico para la creación de efectos visuales y gráficos en movimiento. Si quieres activar Adobe After Effects CC crack y disfrutar de todas sus ventajas sin pagar nada, te hemos explicado en este artículo cómo hacerlo y cuáles son los riesgos, los beneficios y las alternativas que existen. También te hemos dado algunos consejos y trucos para mejorar tus habilidades con el programa y crear efectos visuales increíbles. Esperamos que este artículo te haya sido útil y te invitamos a dejar un comentario con tu opinión o tus dudas sobre el tema. ¡Que te diviertas con Adobe After Effects CC!

-

Conclusión

- -

Adobe After Effects CC es un programa increíble para la creación de efectos visuales y gráficos en movimiento. Si quieres activar Adobe After Effects CC crack y disfrutar de todas sus ventajas sin pagar nada, te hemos explicado en este artículo cómo hacerlo y cuáles son los riesgos, los beneficios y las alternativas que existen. También te hemos dado algunos consejos y trucos para mejorar tus habilidades con el programa y crear efectos visuales increíbles. Esperamos que este artículo te haya sido útil y te invitamos a dejar un comentario con tu opinión o tus dudas sobre el tema. ¡Que te diviertas con Adobe After Effects CC!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ashampoo Burning Studio V8.08 Portable Free Download UPDATED.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ashampoo Burning Studio V8.08 Portable Free Download UPDATED.md deleted file mode 100644 index 27086246caf3db611c15135a67da61c3ce62fc5a..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ashampoo Burning Studio V8.08 Portable Free Download UPDATED.md +++ /dev/null @@ -1,9 +0,0 @@ - -

with its multi-format support, a vast array of disc types, various graphic formats, and a varied selection of additional features, burning studio is widely used by beginners and experts alike. besides the ability to burn bootable media, it also includes an audio editor, video editor, and disc tracing to create a dvd menu, thereby making it a reliable application with support for a multitude of features and categories.

-

there are other great features, some of which are even exclusive to ashampoo burning studio. highlights include multi-file compression, its unique integrated cover editor, complete handling of e-disc discs, drag-and-drop ripper integration, disc spanning, and even free upgrades.

-

Ashampoo Burning Studio v8.08 Portable free download


Download File ———>>> https://urlgoal.com/2uCMqR



-

burning studio professional features are just one of the hundreds of features, features which can be accessed from the integrated start menu or by opening menus directly with just a few mouse clicks. the setup wizard will help you start burning studio in the easiest way possible.

-

burning studio v8.08 features an import option that automatically identifies and downloads metadata information, creates and corrects a track list, and if you have installed it, also creates previews for discs.

-

burning studio is truly the app to suit your burning needs. you can burn to audio cds, data cds, video cds, dvds, hard drive or solid state drives, and blu-ray discs. it supports a wide selection of burning technologies such as sony's mini cd and m-disc, tdk's mdscd, mitsubishi's udma/m-disc, and bd-r. in addition, burning studio supports a wide range of audio and video file types and can use a number of advanced features such as record and playback, d2d / d2d-x, jitter-reduction, and gapless playback.

899543212b
-
-
\ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bupena Kelas 5 Sd Pdf 71.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bupena Kelas 5 Sd Pdf 71.md deleted file mode 100644 index 34de08d71434977881e5c9f3a5bd695c9601e26a..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bupena Kelas 5 Sd Pdf 71.md +++ /dev/null @@ -1,6 +0,0 @@ - -

download bupena 3c hal. it is possible to download music for free, you will need to have a high speed internet connection. if youre looking to download music for free, there are several things that you need to consider. first, be sure the program you choose to download is free, and that its compatible with your platform youre using. this will let you download your files anywhere youd like to. if youre not certain. pour tlcharger le mp3 de kunci jawaban bupena 2d tema 8 sub tema 3 hal 141 162 ayo berlatih latihan soal ayra, il suffit de suivre kunci jawaban bupena 2d tema 8 sub tema 3 hal 141 162 ayo berlatih latihan soal ayra mp3 if youre interested in downloading mp3 music for free, there are a number of things that you need to consider. first of all, make sure the program is not cost effective. pour tlcharger le mp3 de bupena 3c hal 147, il suffit de suivre bupena 3c hal 147 mp3 if youre looking to download mp3 songs at no cost, there are several things you need to think about. first, be sure that the program you choose to download is free, and that its compatible with your platform youre using.

-

download bupena 3c hal. it is possible to download music for free, you will need to have a high speed internet connection. if youre looking to download music for free, there are several things that you need to consider. first, be sure the program you choose to download is free, and that its compatible with your platform youre using. this will let you download your files anywhere youd like to. if youre not certain.

-

bupena kelas 5 sd pdf 71


DOWNLOADhttps://urlgoal.com/2uCLIp



899543212b
-
-
\ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Elipse Scada Hardkey [2021] Crackl.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Elipse Scada Hardkey [2021] Crackl.md deleted file mode 100644 index 8655a2465669a779b5aa01a98d61086176aa8027..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Elipse Scada Hardkey [2021] Crackl.md +++ /dev/null @@ -1,6 +0,0 @@ -

Elipse Scada Hardkey Crackl


DOWNLOAD 🗸 https://urlgoal.com/2uCJAb



-
-[Users choice] elipse scada hardkey crack · free download ojdbc14.jar for oracle 11g · Eguasoft Handball Scoreboard 2.4.0.0 Crack.rar · cherub ... 4d29de3e1b
-
-
-

diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Handycache Full.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Handycache Full.md deleted file mode 100644 index 26adc7d4ded34245d208bd33d9c9264c808b0205..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Handycache Full.md +++ /dev/null @@ -1,6 +0,0 @@ -

handycache full


DOWNLOADhttps://urlgoal.com/2uCJp0



- -Download Handycache HandyCache Pergi ke router dan aktifkan Internal-Proxynya Isi IP parent proxy dan portnya HC dan matikan cache ... 1fdad05405
-
-
-

diff --git a/spaces/reha/Stick_Tech/preprocess_flist_config.py b/spaces/reha/Stick_Tech/preprocess_flist_config.py deleted file mode 100644 index 927dea890c0057063080b48edc6dd8c2588c6e27..0000000000000000000000000000000000000000 --- a/spaces/reha/Stick_Tech/preprocess_flist_config.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -import argparse -from tqdm import tqdm -from random import shuffle -import json -config_template = { - "train": { - "log_interval": 200, - "eval_interval": 1000, - "seed": 1234, - "epochs": 10000, - "learning_rate": 2e-4, - "betas": [0.8, 0.99], - "eps": 1e-9, - "batch_size": 12, - "fp16_run": False, - "lr_decay": 0.999875, - "segment_size": 17920, - "init_lr_ratio": 1, - "warmup_epochs": 0, - "c_mel": 45, - "c_kl": 1.0, - "use_sr": True, - "max_speclen": 384, - "port": "8001" - }, - "data": { - "training_files":"filelists/train.txt", - "validation_files":"filelists/val.txt", - "max_wav_value": 32768.0, - "sampling_rate": 32000, - "filter_length": 1280, - "hop_length": 320, - "win_length": 1280, - "n_mel_channels": 80, - "mel_fmin": 0.0, - "mel_fmax": None - }, - "model": { - "inter_channels": 192, - "hidden_channels": 192, - "filter_channels": 768, - "n_heads": 2, - "n_layers": 6, - "kernel_size": 3, - "p_dropout": 0.1, - "resblock": "1", - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - "upsample_rates": [10,8,2,2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [16,16,4,4], - "n_layers_q": 3, - "use_spectral_norm": False, - "gin_channels": 256, - "ssl_dim": 256, - "n_speakers": 0, - }, - "spk":{ - "nen": 0, - "paimon": 1, - "yunhao": 2 - } -} - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--train_list", type=str, default="./filelists/train.txt", help="path to train list") - parser.add_argument("--val_list", type=str, default="./filelists/val.txt", help="path to val list") - parser.add_argument("--test_list", type=str, default="./filelists/test.txt", help="path to test list") - parser.add_argument("--source_dir", type=str, default="./dataset/32k", help="path to source dir") - args = parser.parse_args() - - train = [] - val = [] - test = [] - idx = 0 - spk_dict = {} - spk_id = 0 - for speaker in tqdm(os.listdir(args.source_dir)): - spk_dict[speaker] = spk_id - spk_id += 1 - wavs = [os.path.join(args.source_dir, speaker, i)for i in os.listdir(os.path.join(args.source_dir, speaker))] - wavs = [i for i in wavs if i.endswith("wav")] - shuffle(wavs) - train += wavs[2:-10] - val += wavs[:2] - test += wavs[-10:] - n_speakers = len(spk_dict.keys())*2 - shuffle(train) - shuffle(val) - shuffle(test) - - print("Writing", args.train_list) - with open(args.train_list, "w") as f: - for fname in tqdm(train): - wavpath = fname - f.write(wavpath + "\n") - - print("Writing", args.val_list) - with open(args.val_list, "w") as f: - for fname in tqdm(val): - wavpath = fname - f.write(wavpath + "\n") - - print("Writing", args.test_list) - with open(args.test_list, "w") as f: - for fname in tqdm(test): - wavpath = fname - f.write(wavpath + "\n") - - config_template["model"]["n_speakers"] = n_speakers - config_template["spk"] = spk_dict - print("Writing configs/config.json") - with open("configs/config.json", "w") as f: - json.dump(config_template, f, indent=2) diff --git a/spaces/riccorl/relik-entity-linking/relik/reader/trainer/predict.py b/spaces/riccorl/relik-entity-linking/relik/reader/trainer/predict.py deleted file mode 100644 index 3801bef958f9a092f8d094d2e99fe476fd4caed9..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/reader/trainer/predict.py +++ /dev/null @@ -1,57 +0,0 @@ -import argparse -from pprint import pprint -from typing import Optional - -from relik.reader.relik_reader import RelikReader -from relik.reader.utils.strong_matching_eval import StrongMatching - - -def predict( - model_path: str, - dataset_path: str, - token_batch_size: int, - is_eval: bool, - output_path: Optional[str], -) -> None: - relik_reader = RelikReader(model_path) - predicted_samples = relik_reader.link_entities( - dataset_path, token_batch_size=token_batch_size - ) - if is_eval: - eval_dict = StrongMatching()(predicted_samples) - pprint(eval_dict) - if output_path is not None: - with open(output_path, "w") as f: - for sample in predicted_samples: - f.write(sample.to_jsons() + "\n") - - -def parse_arg() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument( - "--model-path", - required=True, - ) - parser.add_argument("--dataset-path", "-i", required=True) - parser.add_argument("--is-eval", action="store_true") - parser.add_argument( - "--output-path", - "-o", - ) - parser.add_argument("--token-batch-size", default=4096) - return parser.parse_args() - - -def main(): - args = parse_arg() - predict( - args.model_path, - args.dataset_path, - token_batch_size=args.token_batch_size, - is_eval=args.is_eval, - output_path=args.output_path, - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/rinme/vits-models/app.py b/spaces/rinme/vits-models/app.py deleted file mode 100644 index 31cdc30680f88fe0a9a7e96575218eeeca606ad1..0000000000000000000000000000000000000000 --- a/spaces/rinme/vits-models/app.py +++ /dev/null @@ -1,290 +0,0 @@ -# coding=utf-8 -import os -import re -import argparse -import utils -import commons -import json -import torch -import gradio as gr -from models import SynthesizerTrn -from text import text_to_sequence, _clean_text -from torch import no_grad, LongTensor -import gradio.processing_utils as gr_processing_utils -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -hps_ms = utils.get_hparams_from_file(r'config/config.json') - -audio_postprocess_ori = gr.Audio.postprocess - -def audio_postprocess(self, y): - data = audio_postprocess_ori(self, y) - if data is None: - return None - return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) - - -gr.Audio.postprocess = audio_postprocess - -def get_text(text, hps, is_symbol): - text_norm, clean_text = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def create_tts_fn(net_g_ms, speaker_id): - def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol): - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if limitation: - text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) - max_len = 100 - if is_symbol: - max_len *= 3 - if text_len > max_len: - return "Error: Text is too long", None - if not is_symbol: - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms, is_symbol) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - sid = LongTensor([speaker_id]).to(device) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "Success", (22050, audio) - return tts_fn - -def create_to_symbol_fn(hps): - def to_symbol_fn(is_symbol_input, input_text, temp_lang): - if temp_lang == 0: - clean_text = f'[ZH]{input_text}[ZH]' - elif temp_lang == 1: - clean_text = f'[JA]{input_text}[JA]' - else: - clean_text = input_text - return _clean_text(clean_text, hps.data.text_cleaners) if is_symbol_input else '' - - return to_symbol_fn -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - elif language == 1: - return 0.6, 0.668, 1 - else: - return 0.6, 0.668, 1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio-{audio_id}").querySelector("audio"); - let text = root.querySelector("#input-text-{audio_id}").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--api', action="store_true", default=False) - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--all", action="store_true", default=False, help="enable all models") - args = parser.parse_args() - device = torch.device(args.device) - categories = ["Honkai: Star Rail", "Blue Archive", "Lycoris Recoil"] - others = { - "Princess Connect! Re:Dive": "https://huggingface.co/spaces/sayashi/vits-models-pcr", - "Genshin Impact": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3", - "Honkai Impact 3rd": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3", - "Overwatch 2": "https://huggingface.co/spaces/sayashi/vits-models-ow2", - } - if args.all: - categories = ["Honkai: Star Rail", "Blue Archive", "Lycoris Recoil", "Princess Connect! Re:Dive", "Genshin Impact", "Honkai Impact 3rd", "Overwatch 2"] - others = {} - models = [] - with open("pretrained_models/info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for i, info in models_info.items(): - if info['title'].split("-")[0] not in categories or not info['enable']: - continue - sid = info['sid'] - name_en = info['name_en'] - name_zh = info['name_zh'] - title = info['title'] - cover = f"pretrained_models/{i}/{info['cover']}" - example = info['example'] - language = info['language'] - net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers if info['type'] == "multi" else 0, - **hps_ms.model) - utils.load_checkpoint(f'pretrained_models/{i}/{i}.pth', net_g_ms, None) - _ = net_g_ms.eval().to(device) - models.append((sid, name_en, name_zh, title, cover, example, language, net_g_ms, create_tts_fn(net_g_ms, sid), create_to_symbol_fn(hps_ms))) - with gr.Blocks() as app: - gr.Markdown( - "#
vits-models\n" - "##
Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n" - "##
请不要生成会对个人以及组织造成侵害的内容\n\n" - "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)\n\n" - "[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm-dark.svg)](https://huggingface.co/spaces/sayashi/vits-models?duplicate=true)\n\n" - "[![Finetune your own model](https://badgen.net/badge/icon/github?icon=github&label=Finetune%20your%20own%20model)](https://github.com/SayaSS/vits-finetuning)" - ) - - with gr.Tabs(): - for category in categories: - with gr.TabItem(category): - with gr.TabItem("EN"): - for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models: - if title.split("-")[0] != category: - continue - with gr.TabItem(name_en): - with gr.Row(): - gr.Markdown( - '
' - f'{title}' - f'' if cover else "" - '
' - ) - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)" if limitation else "Text", lines=5, value=example, elem_id=f"input-text-en-{name_en.replace(' ','')}") - lang = gr.Dropdown(label="Language", choices=["Chinese", "Japanese", "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"], - type="index", value=language) - with gr.Accordion(label="Advanced Options", open=False): - symbol_input = gr.Checkbox(value=False, label="Symbol input") - symbol_list = gr.Dataset(label="Symbol list", components=[input_text], - samples=[[x] for x in hps_ms.symbols]) - symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False) - btn = gr.Button(value="Generate", variant="primary") - with gr.Row(): - ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}") - download = gr.Button("Download Audio") - btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2], api_name=f"tts-{name_en}") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}")) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - symbol_input.change( - to_symbol_fn, - [symbol_input, input_text, lang], - [input_text] - ) - symbol_list.click(None, [symbol_list, symbol_list_json], [input_text], - _js=f""" - (i,symbols) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#input-text-en-{name_en.replace(' ', '')}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - return text_input.value; - }}""") - with gr.TabItem("中文"): - for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models: - if title.split("-")[0] != category: - continue - with gr.TabItem(name_zh): - with gr.Row(): - gr.Markdown( - '
' - f'{title}' - f'' if cover else "" - '
' - ) - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="文本 (100字上限)" if limitation else "文本", lines=5, value=example, elem_id=f"input-text-zh-{name_zh}") - lang = gr.Dropdown(label="语言", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文"if language == "Chinese" else "日语") - with gr.Accordion(label="高级选项", open=False): - symbol_input = gr.Checkbox(value=False, label="符号输入") - symbol_list = gr.Dataset(label="符号列表", components=[input_text], - samples=[[x] for x in hps_ms.symbols]) - symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False) - btn = gr.Button(value="生成", variant="primary") - with gr.Row(): - ns = gr.Slider(label="控制感情变化程度", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="控制音素发音长度", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="控制整体语速", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="输出信息") - o2 = gr.Audio(label="输出音频", elem_id=f"tts-audio-zh-{name_zh}") - download = gr.Button("下载音频") - btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2]) - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"zh-{name_zh}")) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - symbol_input.change( - to_symbol_fn, - [symbol_input, input_text, lang], - [input_text] - ) - symbol_list.click(None, [symbol_list, symbol_list_json], [input_text], - _js=f""" - (i,symbols) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#input-text-zh-{name_zh}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - return text_input.value; - }}""") - for category, link in others.items(): - with gr.TabItem(category): - gr.Markdown( - f''' -
-

Click to Go

- - -
- ''' - ) - app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share) diff --git a/spaces/rinme/vits-models/utils.py b/spaces/rinme/vits-models/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/rinme/vits-models/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/rkingery/dumb-language-model/utils.py b/spaces/rkingery/dumb-language-model/utils.py deleted file mode 100644 index b5f67579b4daae7f4e89ca4656cfd40dcedffd15..0000000000000000000000000000000000000000 --- a/spaces/rkingery/dumb-language-model/utils.py +++ /dev/null @@ -1,82 +0,0 @@ -import torch -import torchtext -from pathlib import Path - -from model import EncoderLM - -MAX_LEN = 50 -TEMPERATURE = 1.0 - -device = 'cpu' -model_dir = Path().cwd() / 'models' -model_path = model_dir / 'lm_8_layers.pth' -vocab_path = model_dir / 'vocab_8_layers.pth' - -emb_size = 768 -dim_feedforward = 2048 -num_layers = 8 -nhead = 12 - -def get_vocab(): - return torch.load(vocab_path, map_location=torch.device(device)) - -vocab = get_vocab() -vocab_size = len(vocab) -padding_idx = vocab.get_stoi()[''] - -def get_model(): - model = EncoderLM(vocab_size, emb_size, num_layers=num_layers, nhead=8, dim_feedforward=dim_feedforward, - masking=True, padding_idx=padding_idx, dropout=0.1, max_len=525, deepnorm=True).to(device) - model.load_state_dict(torch.load(model_path, map_location=torch.device(device))) - return model - -def clean_text(tokens): - from nltk.tokenize.treebank import TreebankWordDetokenizer - text = [] - prev_token = '' - for token in tokens: - if token != '': - if prev_token == '': - token = token.upper() - if prev_token == '': - token = token.title() - if token == '@-@': - token = '-' - if token not in ['', '', '', '']: - text.append(token) - prev_token = token - detokenizer = TreebankWordDetokenizer() - text = detokenizer.detokenize(text) - text = text.replace(" ' ", "' ").replace("' ", "'") - text = text.replace(" . . . ", "...").replace(" . ", ". ").replace(" ? ", "? ").replace(" ! ", "! ") - return text - -def generate_text(seed, model, vocab, max_len=20, temperature=1., device=device, skip_tokens=[''], top_k=50): - stoi, itos = vocab.get_stoi(), vocab.get_itos() - stoi_map = lambda word: stoi[word] if word in stoi.keys() else stoi[''] - tokenizer = torchtext.data.utils.get_tokenizer('basic_english') - model = model.eval() - seed_tokens = [''] + tokenizer(seed) - x = torch.tensor([stoi_map(word) for word in seed_tokens]).long().to(device)[None, :] - idxs = [] - for _ in range(max_len): - yhat = model(x) / temperature - probs = yhat[:, -1].softmax(dim=-1).squeeze() - top_probs = torch.topk(probs, top_k, dim=-1).indices - probs[~top_probs] = 0. - idx = torch.multinomial(probs, 1, replacement=True).item() - idxs.append(idx) - x = torch.cat([x, torch.ones(1, 1).fill_(idx).long().to(device)], dim=1) - if itos[idx] == '': - break - generated = [itos[idx] for idx in idxs] - text = seed + ' ' + clean_text(generated) - return text - - -if __name__ == '__main__': - vocab = get_vocab() - model = get_model() - seed = 'Tell me a story about' - generated = generate_text(seed, model, vocab, max_len=20, temperature=1.0, device=device, skip_tokens=[''], top_k=50) - print(generated) diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/mask/utils.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/mask/utils.py deleted file mode 100644 index 90544b34f49aa60ac2a1abae10f1a89cc9fe43f0..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/mask/utils.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np -import pycocotools.mask as mask_util -import torch - - -def split_combined_polys(polys, poly_lens, polys_per_mask): - """Split the combined 1-D polys into masks. - - A mask is represented as a list of polys, and a poly is represented as - a 1-D array. In dataset, all masks are concatenated into a single 1-D - tensor. Here we need to split the tensor into original representations. - - Args: - polys (list): a list (length = image num) of 1-D tensors - poly_lens (list): a list (length = image num) of poly length - polys_per_mask (list): a list (length = image num) of poly number - of each mask - - Returns: - list: a list (length = image num) of list (length = mask num) of \ - list (length = poly num) of numpy array. - """ - mask_polys_list = [] - for img_id in range(len(polys)): - polys_single = polys[img_id] - polys_lens_single = poly_lens[img_id].tolist() - polys_per_mask_single = polys_per_mask[img_id].tolist() - - split_polys = mmcv.slice_list(polys_single, polys_lens_single) - mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) - mask_polys_list.append(mask_polys) - return mask_polys_list - - -# TODO: move this function to more proper place -def encode_mask_results(mask_results): - """Encode bitmap mask to RLE code. - - Args: - mask_results (list | tuple[list]): bitmap mask results. - In mask scoring rcnn, mask_results is a tuple of (segm_results, - segm_cls_score). - - Returns: - list | tuple: RLE encoded mask. - """ - if isinstance(mask_results, tuple): # mask scoring - cls_segms, cls_mask_scores = mask_results - else: - cls_segms = mask_results - num_classes = len(cls_segms) - encoded_mask_results = [[] for _ in range(num_classes)] - for i in range(len(cls_segms)): - for cls_segm in cls_segms[i]: - encoded_mask_results[i].append( - mask_util.encode( - np.array( - cls_segm[:, :, np.newaxis], order='F', - dtype='uint8'))[0]) # encoded with RLE - if isinstance(mask_results, tuple): - return encoded_mask_results, cls_mask_scores - else: - return encoded_mask_results - - -def mask2bbox(masks): - """Obtain tight bounding boxes of binary masks. - - Args: - masks (Tensor): Binary mask of shape (n, h, w). - - Returns: - Tensor: Bboxe with shape (n, 4) of \ - positive region in binary mask. - """ - N = masks.shape[0] - bboxes = masks.new_zeros((N, 4), dtype=torch.float32) - x_any = torch.any(masks, dim=1) - y_any = torch.any(masks, dim=2) - for i in range(N): - x = torch.where(x_any[i, :])[0] - y = torch.where(y_any[i, :])[0] - if len(x) > 0 and len(y) > 0: - bboxes[i, :] = bboxes.new_tensor( - [x[0], y[0], x[-1] + 1, y[-1] + 1]) - - return bboxes diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/grid_rcnn.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/grid_rcnn.py deleted file mode 100644 index bba7873bcf3df1ca82f471a86cce5a3f15ccf724..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/grid_rcnn.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class GridRCNN(TwoStageDetector): - """Grid R-CNN. - - This detector is the implementation of: - - Grid R-CNN (https://arxiv.org/abs/1811.12030) - - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688) - """ - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None, - init_cfg=None): - super(GridRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/reppoints_detector.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/reppoints_detector.py deleted file mode 100644 index f1986cdccf3da96cd179f6bfe9f4f16ff54c411e..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/reppoints_detector.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class RepPointsDetector(SingleStageDetector): - """RepPoints: Point Set Representation for Object Detection. - - This detector is the implementation of: - - RepPoints detector (https://arxiv.org/pdf/1904.11490) - """ - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(RepPointsDetector, - self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, - pretrained, init_cfg) diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py deleted file mode 100644 index 5bbe7eea49cae55ef3c4bdbb17e41f5788e45c79..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.runner import auto_fp16, force_fp32 - -from mmdet.core import mask_target -from mmdet.models.builder import HEADS -from mmdet.models.dense_heads.atss_head import reduce_mean -from mmdet.models.utils import build_transformer -from .fcn_mask_head import FCNMaskHead - - -@HEADS.register_module() -class DynamicMaskHead(FCNMaskHead): - r"""Dynamic Mask Head for - `Instances as Queries `_ - - Args: - num_convs (int): Number of convolution layer. - Defaults to 4. - roi_feat_size (int): The output size of RoI extractor, - Defaults to 14. - in_channels (int): Input feature channels. - Defaults to 256. - conv_kernel_size (int): Kernel size of convolution layers. - Defaults to 3. - conv_out_channels (int): Output channels of convolution layers. - Defaults to 256. - num_classes (int): Number of classes. - Defaults to 80 - class_agnostic (int): Whether generate class agnostic prediction. - Defaults to False. - dropout (float): Probability of drop the channel. - Defaults to 0.0 - upsample_cfg (dict): The config for upsample layer. - conv_cfg (dict): The convolution layer config. - norm_cfg (dict): The norm layer config. - dynamic_conv_cfg (dict): The dynamic convolution layer config. - loss_mask (dict): The config for mask loss. - """ - - def __init__(self, - num_convs=4, - roi_feat_size=14, - in_channels=256, - conv_kernel_size=3, - conv_out_channels=256, - num_classes=80, - class_agnostic=False, - upsample_cfg=dict(type='deconv', scale_factor=2), - conv_cfg=None, - norm_cfg=None, - dynamic_conv_cfg=dict( - type='DynamicConv', - in_channels=256, - feat_channels=64, - out_channels=256, - input_feat_shape=14, - with_proj=False, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN')), - loss_mask=dict(type='DiceLoss', loss_weight=8.0), - **kwargs): - super(DynamicMaskHead, self).__init__( - num_convs=num_convs, - roi_feat_size=roi_feat_size, - in_channels=in_channels, - conv_kernel_size=conv_kernel_size, - conv_out_channels=conv_out_channels, - num_classes=num_classes, - class_agnostic=class_agnostic, - upsample_cfg=upsample_cfg, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - loss_mask=loss_mask, - **kwargs) - assert class_agnostic is False, \ - 'DynamicMaskHead only support class_agnostic=False' - self.fp16_enabled = False - - self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) - - def init_weights(self): - """Use xavier initialization for all weight parameter and set - classification head bias as a specific value when use focal loss.""" - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - nn.init.constant_(self.conv_logits.bias, 0.) - - @auto_fp16() - def forward(self, roi_feat, proposal_feat): - """Forward function of DynamicMaskHead. - - Args: - roi_feat (Tensor): Roi-pooling features with shape - (batch_size*num_proposals, feature_dimensions, - pooling_h , pooling_w). - proposal_feat (Tensor): Intermediate feature get from - diihead in last stage, has shape - (batch_size*num_proposals, feature_dimensions) - - Returns: - mask_pred (Tensor): Predicted foreground masks with shape - (batch_size*num_proposals, num_classes, - pooling_h*2, pooling_w*2). - """ - - proposal_feat = proposal_feat.reshape(-1, self.in_channels) - proposal_feat_iic = self.instance_interactive_conv( - proposal_feat, roi_feat) - - x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size()) - - for conv in self.convs: - x = conv(x) - if self.upsample is not None: - x = self.upsample(x) - if self.upsample_method == 'deconv': - x = self.relu(x) - mask_pred = self.conv_logits(x) - return mask_pred - - @force_fp32(apply_to=('mask_pred', )) - def loss(self, mask_pred, mask_targets, labels): - num_pos = labels.new_ones(labels.size()).float().sum() - avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item() - loss = dict() - if mask_pred.size(0) == 0: - loss_mask = mask_pred.sum() - else: - loss_mask = self.loss_mask( - mask_pred[torch.arange(num_pos).long(), labels, ...].sigmoid(), - mask_targets, - avg_factor=avg_factor) - loss['loss_mask'] = loss_mask - return loss - - def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): - - pos_proposals = [res.pos_bboxes for res in sampling_results] - pos_assigned_gt_inds = [ - res.pos_assigned_gt_inds for res in sampling_results - ] - mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, - gt_masks, rcnn_train_cfg) - return mask_targets diff --git a/spaces/rorallitri/biomedical-language-models/logs/Beautiful Creatures Caster Chronicles 1 EPUB The First Book in the Bestselling Series that Inspired the Movie.md b/spaces/rorallitri/biomedical-language-models/logs/Beautiful Creatures Caster Chronicles 1 EPUB The First Book in the Bestselling Series that Inspired the Movie.md deleted file mode 100644 index 0dbbf6f97a2e202314be8083425344b2516365f8..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Beautiful Creatures Caster Chronicles 1 EPUB The First Book in the Bestselling Series that Inspired the Movie.md +++ /dev/null @@ -1,6 +0,0 @@ -

beautiful creatures caster chronicles 1 epub


Downloadhttps://tinurll.com/2uzlo3



- - aaccfb2cb3
-
-
-

diff --git a/spaces/rorallitri/biomedical-language-models/logs/Descargar windows xp cd key changer.rar.html (0.00 mb) - Change your Windows XP product key easily and safely.md b/spaces/rorallitri/biomedical-language-models/logs/Descargar windows xp cd key changer.rar.html (0.00 mb) - Change your Windows XP product key easily and safely.md deleted file mode 100644 index ba3493f2415cc4390bcaf678347d94e2a4163f53..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Descargar windows xp cd key changer.rar.html (0.00 mb) - Change your Windows XP product key easily and safely.md +++ /dev/null @@ -1,6 +0,0 @@ -

Descargar windows xp cd key changer.rar.html (0.00 mb)


Downloadhttps://tinurll.com/2uzohh



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/rorallitri/biomedical-language-models/logs/Lien Se Lankstaanskoene The Full Book To Download.md b/spaces/rorallitri/biomedical-language-models/logs/Lien Se Lankstaanskoene The Full Book To Download.md deleted file mode 100644 index e4355cb2a15fdfb6daf416da5e9af6d617b1eef8..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Lien Se Lankstaanskoene The Full Book To Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

lien se lankstaanskoene the full book to download


Download File ····· https://tinurll.com/2uzlXr



-
-Lien Se Lankstaanskoene Afrikaans Eerste Addisionele Taal Graad 12 ... skills, Life skills & personal awareness, general studies, Children's & Educational, Books. 4d29de3e1b
-
-
-

diff --git a/spaces/ryansilk/quantycs/quantycs/2_Stock_Data.py b/spaces/ryansilk/quantycs/quantycs/2_Stock_Data.py deleted file mode 100644 index de8c1af42bbf99a75bc9e21c411d0e1b49fea456..0000000000000000000000000000000000000000 --- a/spaces/ryansilk/quantycs/quantycs/2_Stock_Data.py +++ /dev/null @@ -1,335 +0,0 @@ -import streamlit as st -from streamlit_option_menu import option_menu -import pyEX as p -import plotly.graph_objects as go -from plotly import graph_objects as go -import plost -import numpy as np -import plotly.express as px - -# Initial setup of the page -st.set_page_config(layout='wide', initial_sidebar_state='expanded', page_title='Stock Data') - -# Setting the page title -st.title('Stock Data') - -# Initialize the variable to an empty string -my_input = "" - -# Display the option menu which goes across the top of the page to split the pages into sub-pages -selected = option_menu( - menu_title=None, - options=["Technical", "Quantitative", "Custom"], - icons=["pencil-fill", "bar-chart-fill"], - orientation="horizontal", -) - -# Giving the sidebar a header -st.sidebar.header('Quantycs Test `version 1`') - -# If the sidebar loads, state the below text in green -st.sidebar.success("Select Stock Data Below") - -# Give the user a dropdown menu to select a stock -stock_option = st.sidebar.selectbox('Select one symbol', ('AAPL', 'MSFT', "SPY", 'WMT')) - -# End of sidebar note to say created by Ryan -st.sidebar.markdown(''' ---- -Created by Ryan Silk -''') - -# Display the appropriate content based on the user's selection of page - -if selected == "Technical": - # Call the API from IEX Cloud based on user input from 'stock_option' variable - token = 'sk_705b5ca7744f49009b2004c682c3a010' - c = p.Client(api_token=token, version='stable') - - # Assign the API call to a pandas dataframe called ticker - ticker = c.chartDF(symbol=stock_option, timeframe='1y')[ - ['open', 'high', 'low', 'close', 'volume']] - - ticker = ticker.sort_values(by="date", ascending=True) - - delta = ticker['close'].diff(5) - ticker['Close_to_Close_5'] = delta - - # 5 RSI + Percentile Calculations - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=4, adjust=False).mean() - ema_down = down.ewm(com=4, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (5)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 14 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=13, adjust=False).mean() - ema_down = down.ewm(com=13, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (14)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 50 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=49, adjust=False).mean() - ema_down = down.ewm(com=49, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (50)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 100 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=99, adjust=False).mean() - ema_down = down.ewm(com=99, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (100)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 200 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=199, adjust=False).mean() - ema_down = down.ewm(com=199, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (200)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # Moving Averages - ticker['Moving Average (5)'] = ticker['close'].rolling(5).mean().round(2) - ticker['Moving Average (14)'] = ticker['close'].rolling(14).mean().round(2) - ticker['Moving Average (50)'] = ticker['close'].rolling(50).mean().round(2) - ticker['Moving Average (100)'] = ticker['close'].rolling(100).mean().round(2) - ticker['Moving Average (200)'] = ticker['close'].rolling(200).mean().round(2) - - ticker['Yesterday Moving Average (5)'] = ticker['Moving Average (5)'].take([-2]) - - ticker['MA Difference (5)'] = (ticker['Moving Average (5)'] / ticker['Yesterday Moving Average (5)']) - - # Moving Average Trend Analysis - - # Values for Moving Averages (today) - - today_ma_5 = ticker['Moving Average (5)'].tail(1) - today_ma_14 = ticker['Moving Average (14)'].tail(1) - today_ma_50 = ticker['Moving Average (50)'].tail(1) - today_ma_100 = ticker['Moving Average (100)'].tail(1) - today_ma_200 = ticker['Moving Average (200)'].tail(1) - - - # Values for Moving Averages (yesterday) - - yesterday_ma_5 = ticker['Moving Average (5)'].take([-2]) - yesterday_ma_14 = ticker['Moving Average (14)'].take([-2]) - yesterday_ma_50 = ticker['Moving Average (50)'].take([-2]) - yesterday_ma_100 = ticker['Moving Average (100)'].take([-2]) - yesterday_ma_200 = ticker['Moving Average (200)'].take([-2]) - - result_5 = ticker['MA Difference (5)'].tail(1) - - st.markdown('### Moving Average Trend Analysis') - col1, col2, col3, col4, col5 = st.columns(5) - col1.metric("5 Day", today_ma_5, "3.4%") - col2.metric("14 Day", today_ma_14, "4.6%") - col3.metric("50 Day", today_ma_50, "4.3%") - col4.metric("100 Day", today_ma_100, "4.3%") - col5.metric("200 Day", today_ma_200, "4.3%") - - # Row B - - # Values for RSIs (today) - - today_rsi_5 = ticker['RSI (5)'].tail(1) - today_rsi_14 = ticker['RSI (14)'].tail(1) - today_rsi_50 = ticker['RSI (50)'].tail(1) - today_rsi_100 = ticker['RSI (100)'].tail(1) - today_rsi_200 = ticker['RSI (200)'].tail(1) - - # Values for RSIs (yesterday) - - yesterday_rsi_5 = ticker['RSI (5)'].take([-2]) - yesterday_rsi_14 = ticker['RSI (14)'].take([-2]) - yesterday_rsi_50 = ticker['RSI (50)'].take([-2]) - yesterday_rsi_100 = ticker['RSI (100)'].take([-2]) - yesterday_rsi_200 = ticker['RSI (200)'].take([-2]) - - st.markdown('### RSI Analysis') - col10, col20, col30, col40, col50, = st.columns(5) - col10.metric("5 Day", today_rsi_5) - col20.metric("14 Day", today_rsi_14) - col30.metric("50 Day", today_rsi_50) - col40.metric("100 Day", today_rsi_100) - col50.metric("200 day", today_rsi_200) - - - # Writing out the header for the first candletick chart - - st.markdown('### Stock Price Chart') - # Assigning the parameters to the chart - fig = go.Figure(data=go.Candlestick( - open=ticker['open'], - high=ticker['high'], - low=ticker['low'], - close=ticker['close'])) - st.write(fig) - - c1, c2 = st.columns((5, 5)) - with c1: - st.markdown('### Heatmap') - fig_1 = px.line(ticker, y=ticker['RSI (14)'], title='5 Period RSI') - st.write(fig_1) - with c2: - st.markdown('### Heatmap') - fig_1 = px.line(ticker, y=ticker['RSI (14)'], title='5 Period RSI') - st.write(fig_1) - - -if selected == "Quantitative": - # Call the API from IEX Cloud based on user input from 'stock_option' variable - token = 'sk_705b5ca7744f49009b2004c682c3a010' - c = p.Client(api_token=token, version='stable') - - # Assign the API call to a pandas dataframe called ticker - ticker = c.chartDF(symbol=stock_option, timeframe='2y')[ - ['open', 'high', 'low', 'close', 'volume']] - - - # Create various other dataframes based on user input - bb = ticker['close'].rolling(5).quantile(0.1).round(2) - macd = ticker['close'].rolling(5).quantile(0.2).round(2) - rsi = ticker['close'].rolling(5).quantile(0.3).round(2) - ticker['40 Percentile (5)'] = ticker['close'].rolling(5).quantile(0.4).round(2) - ticker['50 Percentile (5)'] = ticker['close'].rolling(5).quantile(0.5).round(2) - - - delta = ticker['close'].diff(5) - ticker['Close_to_Close_5'] = delta - - # 5 RSI + Percentile Calculations - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=4, adjust=False).mean() - ema_down = down.ewm(com=4, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (5)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 10 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=9, adjust=False).mean() - ema_down = down.ewm(com=9, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (10)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 14 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=13, adjust=False).mean() - ema_down = down.ewm(com=13, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (14)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 20 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=19, adjust=False).mean() - ema_down = down.ewm(com=19, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (20)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 50 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=49, adjust=False).mean() - ema_down = down.ewm(com=49, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (50)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 100 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=99, adjust=False).mean() - ema_down = down.ewm(com=99, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (100)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # 200 RSI Calc - up = delta.clip(lower=0) - down = -1 * delta.clip(upper=0) - ema_up = up.ewm(com=199, adjust=False).mean() - ema_down = down.ewm(com=199, adjust=False).mean() - rs = ema_up / ema_down - ticker['RSI (200)'] = 100 - (100 / (1 + rs)) - # Skip first 14 days to have real values - ticker = ticker.iloc[14:].round(2) - - # Row A - st.markdown('### Trend Analysis') - col1, col2, col3, col4, col5 = st.columns(5) - col1.metric("Long Term Trend", "140,02", "-1.3%") - col2.metric("Medium Term Trend", "135.09", "4.6%") - col3.metric("Short Term Trend", "138.83", "4.3%") - col4.metric("Short Term Trend", "138.83", "4.3%") - col5.metric("Short Term Trend", "138.83", "4.3%") - - # Row B - - # Values for RSIs (today) - - today_rsi_5 = ticker['RSI (5)'].tail(1) - today_rsi_14 = ticker['RSI (14)'].tail(1) - today_rsi_50 = ticker['RSI (50)'].tail(1) - today_rsi_100 = ticker['RSI (100)'].tail(1) - today_rsi_200 = ticker['RSI (200)'].tail(1) - - # Values for RSIs (yesterday) - - yesterday_rsi_5 = ticker['RSI (5)'].take([-2]) - yesterday_rsi_14 = ticker['RSI (14)'].tail(1) - yesterday_rsi_50 = ticker['RSI (50)'].tail(1) - yesterday_rsi_100 = ticker['RSI (100)'].tail(1) - yesterday_rsi_200 = ticker['RSI (200)'].tail(1) - - st.markdown('### RSI Analysis') - col10, col20, col30, col40, col50, = st.columns(5) - col10.metric("5 Day", today_rsi_5, "-1.3%") - col20.metric("14 Day", today_rsi_14, "4.6%") - col30.metric("50 Day", today_rsi_50, "4.3%") - col40.metric("100 Day", today_rsi_100, "4.3%") - col50.metric("200 day", today_rsi_200, "4.3%") - - - # Writing out the header for the first candletick chart - st.markdown('### Stock Price Chart') - # Assigning the parameters to the chart - fig = go.Figure(data=go.Candlestick( - open=ticker['open'], - high=ticker['high'], - low=ticker['low'], - close=ticker['close'])) - st.write(fig) - - fig_1 = px.line(ticker, y=ticker['RSI (14)'], title='5 Period RSI') - st.write(fig_1) diff --git a/spaces/samcaicn/bingai/src/lib/isomorphic/browser.ts b/spaces/samcaicn/bingai/src/lib/isomorphic/browser.ts deleted file mode 100644 index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000 --- a/spaces/samcaicn/bingai/src/lib/isomorphic/browser.ts +++ /dev/null @@ -1,11 +0,0 @@ -'use client' - -const debug = console.info.bind(console) - -class WebSocketAlias extends WebSocket { - constructor(address: string | URL, ...args: any) { - super(address) - } -} - -export default { fetch, WebSocket: WebSocketAlias, debug } diff --git a/spaces/sayakpaul/fivek-retouching-maxim/maxim/blocks/block_gating.py b/spaces/sayakpaul/fivek-retouching-maxim/maxim/blocks/block_gating.py deleted file mode 100644 index 0d06af50448f7a15a39c84100be1a99710b24c32..0000000000000000000000000000000000000000 --- a/spaces/sayakpaul/fivek-retouching-maxim/maxim/blocks/block_gating.py +++ /dev/null @@ -1,67 +0,0 @@ -import tensorflow as tf -from tensorflow.keras import backend as K -from tensorflow.keras import layers - -from ..layers import BlockImages, SwapAxes, UnblockImages - - -def BlockGatingUnit(use_bias: bool = True, name: str = "block_gating_unit"): - """A SpatialGatingUnit as defined in the gMLP paper. - - The 'spatial' dim is defined as the **second last**. - If applied on other dims, you should swapaxes first. - """ - - def apply(x): - u, v = tf.split(x, 2, axis=-1) - v = layers.LayerNormalization( - epsilon=1e-06, name=f"{name}_intermediate_layernorm" - )(v) - n = K.int_shape(x)[-2] # get spatial dim - v = SwapAxes()(v, -1, -2) - v = layers.Dense(n, use_bias=use_bias, name=f"{name}_Dense_0")(v) - v = SwapAxes()(v, -1, -2) - return u * (v + 1.0) - - return apply - - -def BlockGmlpLayer( - block_size, - use_bias: bool = True, - factor: int = 2, - dropout_rate: float = 0.0, - name: str = "block_gmlp", -): - """Block gMLP layer that performs local mixing of tokens.""" - - def apply(x): - n, h, w, num_channels = ( - K.int_shape(x)[0], - K.int_shape(x)[1], - K.int_shape(x)[2], - K.int_shape(x)[3], - ) - fh, fw = block_size - gh, gw = h // fh, w // fw - x = BlockImages()(x, patch_size=(fh, fw)) - # MLP2: Local (block) mixing part, provides within-block communication. - y = layers.LayerNormalization(epsilon=1e-06, name=f"{name}_LayerNorm")(x) - y = layers.Dense( - num_channels * factor, - use_bias=use_bias, - name=f"{name}_in_project", - )(y) - y = tf.nn.gelu(y, approximate=True) - y = BlockGatingUnit(use_bias=use_bias, name=f"{name}_BlockGatingUnit")(y) - y = layers.Dense( - num_channels, - use_bias=use_bias, - name=f"{name}_out_project", - )(y) - y = layers.Dropout(dropout_rate)(y) - x = x + y - x = UnblockImages()(x, grid_size=(gh, gw), patch_size=(fh, fw)) - return x - - return apply diff --git a/spaces/sayakpaul/fivek-retouching-maxim/maxim/blocks/misc_gating.py b/spaces/sayakpaul/fivek-retouching-maxim/maxim/blocks/misc_gating.py deleted file mode 100644 index 6787d7fdda921b4ef00d1450e34281c7321833f8..0000000000000000000000000000000000000000 --- a/spaces/sayakpaul/fivek-retouching-maxim/maxim/blocks/misc_gating.py +++ /dev/null @@ -1,213 +0,0 @@ -import functools - -import tensorflow as tf -from tensorflow.keras import backend as K -from tensorflow.keras import layers - -from ..layers import BlockImages, SwapAxes, UnblockImages -from .block_gating import BlockGmlpLayer -from .grid_gating import GridGmlpLayer - -Conv1x1 = functools.partial(layers.Conv2D, kernel_size=(1, 1), padding="same") -Conv3x3 = functools.partial(layers.Conv2D, kernel_size=(3, 3), padding="same") -ConvT_up = functools.partial( - layers.Conv2DTranspose, kernel_size=(2, 2), strides=(2, 2), padding="same" -) -Conv_down = functools.partial( - layers.Conv2D, kernel_size=(4, 4), strides=(2, 2), padding="same" -) - - -def ResidualSplitHeadMultiAxisGmlpLayer( - block_size, - grid_size, - block_gmlp_factor: int = 2, - grid_gmlp_factor: int = 2, - input_proj_factor: int = 2, - use_bias: bool = True, - dropout_rate: float = 0.0, - name: str = "residual_split_head_maxim", -): - """The multi-axis gated MLP block.""" - - def apply(x): - shortcut = x - n, h, w, num_channels = ( - K.int_shape(x)[0], - K.int_shape(x)[1], - K.int_shape(x)[2], - K.int_shape(x)[3], - ) - x = layers.LayerNormalization(epsilon=1e-06, name=f"{name}_LayerNorm_in")(x) - - x = layers.Dense( - int(num_channels) * input_proj_factor, - use_bias=use_bias, - name=f"{name}_in_project", - )(x) - x = tf.nn.gelu(x, approximate=True) - - u, v = tf.split(x, 2, axis=-1) - - # GridGMLPLayer - u = GridGmlpLayer( - grid_size=grid_size, - factor=grid_gmlp_factor, - use_bias=use_bias, - dropout_rate=dropout_rate, - name=f"{name}_GridGmlpLayer", - )(u) - - # BlockGMLPLayer - v = BlockGmlpLayer( - block_size=block_size, - factor=block_gmlp_factor, - use_bias=use_bias, - dropout_rate=dropout_rate, - name=f"{name}_BlockGmlpLayer", - )(v) - - x = tf.concat([u, v], axis=-1) - - x = layers.Dense( - num_channels, - use_bias=use_bias, - name=f"{name}_out_project", - )(x) - x = layers.Dropout(dropout_rate)(x) - x = x + shortcut - return x - - return apply - - -def GetSpatialGatingWeights( - features: int, - block_size, - grid_size, - input_proj_factor: int = 2, - dropout_rate: float = 0.0, - use_bias: bool = True, - name: str = "spatial_gating", -): - - """Get gating weights for cross-gating MLP block.""" - - def apply(x): - n, h, w, num_channels = ( - K.int_shape(x)[0], - K.int_shape(x)[1], - K.int_shape(x)[2], - K.int_shape(x)[3], - ) - - # input projection - x = layers.LayerNormalization(epsilon=1e-06, name=f"{name}_LayerNorm_in")(x) - x = layers.Dense( - num_channels * input_proj_factor, - use_bias=use_bias, - name=f"{name}_in_project", - )(x) - x = tf.nn.gelu(x, approximate=True) - u, v = tf.split(x, 2, axis=-1) - - # Get grid MLP weights - gh, gw = grid_size - fh, fw = h // gh, w // gw - u = BlockImages()(u, patch_size=(fh, fw)) - dim_u = K.int_shape(u)[-3] - u = SwapAxes()(u, -1, -3) - u = layers.Dense(dim_u, use_bias=use_bias, name=f"{name}_Dense_0")(u) - u = SwapAxes()(u, -1, -3) - u = UnblockImages()(u, grid_size=(gh, gw), patch_size=(fh, fw)) - - # Get Block MLP weights - fh, fw = block_size - gh, gw = h // fh, w // fw - v = BlockImages()(v, patch_size=(fh, fw)) - dim_v = K.int_shape(v)[-2] - v = SwapAxes()(v, -1, -2) - v = layers.Dense(dim_v, use_bias=use_bias, name=f"{name}_Dense_1")(v) - v = SwapAxes()(v, -1, -2) - v = UnblockImages()(v, grid_size=(gh, gw), patch_size=(fh, fw)) - - x = tf.concat([u, v], axis=-1) - x = layers.Dense(num_channels, use_bias=use_bias, name=f"{name}_out_project")(x) - x = layers.Dropout(dropout_rate)(x) - return x - - return apply - - -def CrossGatingBlock( - features: int, - block_size, - grid_size, - dropout_rate: float = 0.0, - input_proj_factor: int = 2, - upsample_y: bool = True, - use_bias: bool = True, - name: str = "cross_gating", -): - - """Cross-gating MLP block.""" - - def apply(x, y): - # Upscale Y signal, y is the gating signal. - if upsample_y: - y = ConvT_up( - filters=features, use_bias=use_bias, name=f"{name}_ConvTranspose_0" - )(y) - - x = Conv1x1(filters=features, use_bias=use_bias, name=f"{name}_Conv_0")(x) - n, h, w, num_channels = ( - K.int_shape(x)[0], - K.int_shape(x)[1], - K.int_shape(x)[2], - K.int_shape(x)[3], - ) - - y = Conv1x1(filters=num_channels, use_bias=use_bias, name=f"{name}_Conv_1")(y) - - shortcut_x = x - shortcut_y = y - - # Get gating weights from X - x = layers.LayerNormalization(epsilon=1e-06, name=f"{name}_LayerNorm_x")(x) - x = layers.Dense(num_channels, use_bias=use_bias, name=f"{name}_in_project_x")(x) - x = tf.nn.gelu(x, approximate=True) - gx = GetSpatialGatingWeights( - features=num_channels, - block_size=block_size, - grid_size=grid_size, - dropout_rate=dropout_rate, - use_bias=use_bias, - name=f"{name}_SplitHeadMultiAxisGating_x", - )(x) - - # Get gating weights from Y - y = layers.LayerNormalization(epsilon=1e-06, name=f"{name}_LayerNorm_y")(y) - y = layers.Dense(num_channels, use_bias=use_bias, name=f"{name}_in_project_y")(y) - y = tf.nn.gelu(y, approximate=True) - gy = GetSpatialGatingWeights( - features=num_channels, - block_size=block_size, - grid_size=grid_size, - dropout_rate=dropout_rate, - use_bias=use_bias, - name=f"{name}_SplitHeadMultiAxisGating_y", - )(y) - - # Apply cross gating: X = X * GY, Y = Y * GX - y = y * gx - y = layers.Dense(num_channels, use_bias=use_bias, name=f"{name}_out_project_y")(y) - y = layers.Dropout(dropout_rate)(y) - y = y + shortcut_y - - x = x * gy # gating x using y - x = layers.Dense(num_channels, use_bias=use_bias, name=f"{name}_out_project_x")(x) - x = layers.Dropout(dropout_rate)(x) - x = x + y + shortcut_x # get all aggregated signals - return x, y - - return apply diff --git a/spaces/scedlatioru/img-to-music/example/Flashtool 0 9 10 1 Windows Exe WORK.md b/spaces/scedlatioru/img-to-music/example/Flashtool 0 9 10 1 Windows Exe WORK.md deleted file mode 100644 index b13ce3bd4c4b8c422ba1142425255e63ac8cebc2..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Flashtool 0 9 10 1 Windows Exe WORK.md +++ /dev/null @@ -1,47 +0,0 @@ - -

Flashtool 0 9 10 1 Windows Exe - The Best Tool for Flashing Sony Devices

- -

If you own a Sony device and want to flash custom ROMs, kernels, recoveries or root your device, you need a reliable tool that can do the job safely and easily. That's where Flashtool 0 9 10 1 Windows Exe comes in handy.

-

Flashtool 0 9 10 1 Windows Exe


DOWNLOAD 🗸 https://gohhs.com/2uEAaz



- -

Flashtool 0 9 10 1 Windows Exe is a free software that allows you to flash firmware files (FTF) to your Sony device. It supports a wide range of devices, from Xperia Z to Xperia E, and it works on Windows XP/Vista/7/8/10/11 32 and 64-bit.

- -

How to Download and Install Flashtool 0 9 10 1 Windows Exe

- -

Downloading and installing Flashtool 0 9 10 1 Windows Exe is very simple and fast. You just need to follow these steps:

- -
    -
  1. Go to one of the download links provided by Google Drive or XDA Forums. You can choose from different versions of Flashtool, but we recommend Flashtool 0 9 10 1 Windows Exe as it is the latest and most stable one.
  2. -
  3. Save the file to your computer and run it as administrator. The installation wizard will guide you through the process.
  4. -
  5. Once the installation is finished, you will see a shortcut on your desktop or in your start menu. Launch Flashtool from there.
  6. -
  7. You will need to install drivers for your device if you haven't done so before. You can do that from Flashtool by clicking on the drivers icon and selecting your device model.
  8. -
- -

How to Use Flashtool 0 9 10 1 Windows Exe to Flash Your Device

- -

Using Flashtool 0 9 10 1 Windows Exe to flash your device is very easy and safe. You just need to follow these steps:

- -
    -
  • Make sure your device is fully charged and backed up before flashing anything.
  • -
  • Download the firmware file (FTF) that you want to flash to your device. You can find them on various websites or forums, or you can create your own FTF from official firmware using Flashtool.
  • -
  • Connect your device to your computer using a USB cable. Make sure you enable USB debugging mode on your device.
  • -
  • Open Flashtool and click on the lightning icon. Select Flashmode and browse for the FTF file that you downloaded or created.
  • -
  • Select the options that you want to flash, such as wipe data, cache, apps log, etc. Be careful not to select anything that you don't want to lose.
  • -
  • Click on OK and follow the instructions on the screen. You will need to turn off your device and press a specific button (usually volume down) while connecting it to your computer.
  • -
  • Wait for Flashtool to flash the FTF file to your device. Do not disconnect or interrupt the process until it is done.
  • -
  • Your device will reboot automatically when the flashing is complete. Enjoy your new firmware!
  • -
- -

Conclusion

- -

Flashtool 0 9 10 1 Windows Exe is a great tool for flashing Sony devices. It is easy to use, safe and compatible with many models. It allows you to flash custom ROMs, kernels, recoveries or root your device with ease.

- -

If you want to customize your Sony device and unleash its full potential, download Flashtool 0 9 10 1 Windows Exe today and start flashing!

-

-

Conclusion

- -

Flashtool 0 9 10 1 Windows Exe is a great tool for flashing Sony devices. It is easy to use, safe and compatible with many models. It allows you to flash custom ROMs, kernels, recoveries or root your device with ease.

- -

If you want to customize your Sony device and unleash its full potential, download Flashtool 0 9 10 1 Windows Exe today and start flashing!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Schlongs Of Skyrim Light Se.md b/spaces/scedlatioru/img-to-music/example/Schlongs Of Skyrim Light Se.md deleted file mode 100644 index 70dae56211c157b8815b9b88f443c2a6c5f802d5..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Schlongs Of Skyrim Light Se.md +++ /dev/null @@ -1,6 +0,0 @@ -

Schlongs Of Skyrim Light Se


Download ––– https://gohhs.com/2uEyWp



-
- 3cee63e6c2
-
-
-

diff --git a/spaces/scedlatioru/img-to-music/example/Ys Memories Of Celceta Update V20200219.md b/spaces/scedlatioru/img-to-music/example/Ys Memories Of Celceta Update V20200219.md deleted file mode 100644 index 19e5bf8ff07fe8c9fdbada632b50f49ac1b1faed..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Ys Memories Of Celceta Update V20200219.md +++ /dev/null @@ -1,28 +0,0 @@ -
-

Ys: Memories of Celceta Update v20200219 - What You Need to Know

-

Ys: Memories of Celceta is a remastered version of the classic action RPG that was originally released for PS Vita in 2012. The game follows the adventures of Adol Christin, a young swordsman who wakes up in the mysterious land of Celceta with no memory of his past. Along with his new friends, he sets out to explore the vast wilderness and recover his lost memories.

-

Ys: Memories of Celceta Update v20200219


Download File ::: https://gohhs.com/2uEyOh



-

The game was ported to PC in 2018 by XSEED Games, with enhanced graphics, improved controls, and support for 4K resolution. The PC version also includes all the DLC content that was released for the PS Vita version, such as extra costumes, items, and modes.

-

On February 19, 2020, XSEED Games released a new update for the PC version of Ys: Memories of Celceta. The update v20200219 brings some bug fixes and improvements to the game's performance and stability. Here are some of the main changes that the update introduces:

-
    -
  • Fixed an issue where some enemies would not drop items or gold.
  • -
  • Fixed an issue where some achievements would not unlock properly.
  • -
  • Fixed an issue where some sound effects would not play correctly.
  • -
  • Fixed an issue where some text would not display correctly in some languages.
  • -
  • Improved the game's compatibility with various controllers and keyboards.
  • -
  • Improved the game's loading times and frame rate.
  • -
  • Improved the game's stability and reduced crashes.
  • -
-

To install the update, you need to have the base game installed on your PC. You can download the update from Steam or from the official website of XSEED Games. The update is about 200 MB in size and will overwrite some of the game's files. Make sure to back up your save data before installing the update.

-

Why You Should Play Ys: Memories of Celceta

-

If you are a fan of action RPGs, you should definitely give Ys: Memories of Celceta a try. The game offers a fast-paced and fluid combat system, where you can switch between three characters with different skills and abilities. You can also customize your characters' equipment, skills, and attributes to suit your playstyle.

-

The game also features a rich and immersive world, with diverse environments, colorful graphics, and dynamic weather effects. You can explore the vast map of Celceta, which is filled with secrets, treasures, quests, and enemies. You can also interact with various NPCs, who will offer you information, services, or side stories.

-

-

The game also has a captivating story, with memorable characters, witty dialogue, and emotional moments. You will follow Adol's journey as he gradually regains his memories and learns more about himself and his past. You will also encounter various friends and foes along the way, who will help or hinder your progress.

-

The game also has a great soundtrack, composed by Falcom Sound Team JDK. The music perfectly matches the mood and atmosphere of each scene, from epic battles to serene exploration. The game also features voice acting for most of the dialogue, which adds more personality and expression to the characters.

-

Conclusion

-

Ys: Memories of Celceta is one of the best action RPGs that you can play on PC. The game offers a thrilling and satisfying gameplay experience, with smooth combat, deep customization, and expansive exploration. The game also has a compelling story, with likable characters, humorous dialogue, and touching moments. The game also has a stunning presentation, with beautiful graphics, atmospheric music, and quality voice acting.

-

If you have not played Ys: Memories of Celceta yet, you should definitely check it out. And if you have already played it, you should download the latest update v20200219 to enjoy the game even more. The update fixes some bugs and improves the game's performance and stability.

-

Ys: Memories of Celceta is available on Steam for $24.99 USD. You can also buy the Digital Deluxe Edition for $29.99 USD, which includes a digital art book, a digital soundtrack sampler, and some wallpapers. You can also get the game on GOG.com or Humble Bundle for the same price.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/sdhsdhk/bingosjj/src/components/ui/select.tsx b/spaces/sdhsdhk/bingosjj/src/components/ui/select.tsx deleted file mode 100644 index 77f12c2996f541b97663de4c9e20ab34d4ec2fac..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingosjj/src/components/ui/select.tsx +++ /dev/null @@ -1,123 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SelectPrimitive from '@radix-ui/react-select' - -import { cn } from '@/lib/utils' -import { - IconArrowDown, - IconCheck, - IconChevronUpDown -} from '@/components/ui/icons' - -const Select = SelectPrimitive.Root - -const SelectGroup = SelectPrimitive.Group - -const SelectValue = SelectPrimitive.Value - -const SelectTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - {children} - - - - -)) -SelectTrigger.displayName = SelectPrimitive.Trigger.displayName - -const SelectContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, position = 'popper', ...props }, ref) => ( - - - - {children} - - - -)) -SelectContent.displayName = SelectPrimitive.Content.displayName - -const SelectLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectLabel.displayName = SelectPrimitive.Label.displayName - -const SelectItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - {children} - -)) -SelectItem.displayName = SelectPrimitive.Item.displayName - -const SelectSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectSeparator.displayName = SelectPrimitive.Separator.displayName - -export { - Select, - SelectGroup, - SelectValue, - SelectTrigger, - SelectContent, - SelectLabel, - SelectItem, - SelectSeparator -} diff --git a/spaces/sh20raj/telebot/README.md b/spaces/sh20raj/telebot/README.md deleted file mode 100644 index 924a5f6bd1aec7a42a7acafb9667b7d3297b18f6..0000000000000000000000000000000000000000 --- a/spaces/sh20raj/telebot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Telebot -emoji: 👁 -colorFrom: yellow -colorTo: pink -sdk: streamlit -sdk_version: 1.28.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/shashankanand13/game-automation-webapp/app.py b/spaces/shashankanand13/game-automation-webapp/app.py deleted file mode 100644 index 4fff53dcebb87f678eddead629ce7174a483efc6..0000000000000000000000000000000000000000 --- a/spaces/shashankanand13/game-automation-webapp/app.py +++ /dev/null @@ -1,603 +0,0 @@ -from bokeh.themes import theme -from numpy.core.records import record -import pandas as pd -import numpy as np -import cv2 -import mediapipe as mp -import time -from PIL import Image -import tempfile -from bokeh.models.widgets import Div -import streamlit as st -# --------------------------------------------------------------------- -st.set_page_config( - page_title="Game Keys", - page_icon="🎮", - layout="wide", - initial_sidebar_state="expanded" -) - -# --------------------------------------------------------------------- - -DEMO_IMAGE = 'demo.jpg' -DEMO_VIDEO = 'test3.mp4' -prevTime=0 -currTime=0 -tipIds= [4,8,12,16,20] -st.title('Game Keys with Hand Tracking Web-App') -url = 'https://github.com/shashankanand13monu/Game-Automation' - -# ---------------------------------------------------------------------- - -mp_drawing = mp.solutions.drawing_utils -mp_draw= mp.solutions.drawing_utils -mp_face_mesh = mp.solutions.face_mesh -mp_draw= mp.solutions.drawing_utils -mp_hand= mp.solutions.hands -mp_hands = mp.solutions.hands -mp_drawing_styles = mp.solutions.drawing_styles - -# ---------------------------------------------------------------------- - -t = st.empty() -def draw(str): - t.markdown(f'

{str}

', unsafe_allow_html=True) - -# ---------------------------------------------------------------------- - -st.markdown( - """ - - """,unsafe_allow_html=True,) -# ---------------------------------------------------------------------- -if st.sidebar.button('Github'): - js = "window.open('https://github.com/shashankanand13monu/Game-Automation')" # New tab or window - # js = "window.location.href = 'https://www.streamlit.io/'" # Current tab - html = ''.format(js) - div = Div(text=html) - st.bokeh_chart(div) - -# ---------------------------------------------------------------------- - -st.sidebar.title('Menu') -st.sidebar.subheader('Settings') - -# ---------------------------------------------------------------------- -@st.cache () -# ---------------------------------------------------------------------- - -def image_resize(image, width=None, height=None, inter =cv2.INTER_AREA): - - dim = None - (h ,w) = image.shape[:2] - if width is None and height is None: - return image - if width is None: - r= width/float(w) - dim = (int(w*r), height) - else: - r = width/float(w) - dim = (width, int(h*r)) - #resize the image - resized =cv2.resize(image, dim ,interpolation=inter) - return resized -# ---------------------------------------------------------------------- - -app_mode= st.sidebar.selectbox('Choose the App Mode', - ['About App','Run on Image','Run On Video','Show Code']) - -# ---------------------------------------------------------------------- - -if app_mode== 'About App': - st.markdown('App Made using **Mediapipe** & **Open CV**') - - st.markdown( - """ - - """,unsafe_allow_html=True,) - - st.markdown(''' - # Tutorial \n - - ''' - ) - original_title = '
🕹️ W- 5 Fingers  🕹️ A- 2 or 3 Fingers
' - st.markdown(original_title, unsafe_allow_html=True) - original_title = '
🕹️ S- Fist       🕹️ D- 4 Fingers
' - st.markdown(original_title, unsafe_allow_html=True) - # st.subheader('''W - 5 Fingers''') - # st.subheader('S - Fist\n A - 2 or 3 Fingers\n D - 4 Fingers') - st.image('wsad.jpg',width=200) - original_title = '
*NOTE
' - st.markdown(original_title, unsafe_allow_html=True) - original_title= '''
-    Video Option will Experience Lag in  Browsers.
-    If It's Lagging just Reload & Choose your option ASAP 
-    eg: Choosing Max Hands or Using Webcam. 
-    Webcam Will Take about 20 Seconds to Load
-    
-    Update :
-    1) I Discovered that you can't use Webcam Online,
-    Because then it will try Access Server's Which i don't Own.
-    
-    2) Hand Marks are not showing online + Video freezes
-    
-    Solution :
-    Go to main Streamlit WebApp Code & Run it Locally by typing
-    streamlit run st2.py
-    
''' - # st.markdown('''Video Option will Experience **Lag** in **Browsers**. If It's **Lagging** just **Reload** & Choose your option ASAP eg: **Choosing Max Hands** or **Using Webcam**. Webcam Will Take about **20 Seconds** to Load ''') - st.markdown(original_title, unsafe_allow_html=True) - -# ---------------------------------------------------------------------- - -elif app_mode == 'Run on Image': - drawing_spec = mp_drawing.DrawingSpec(thickness=2, circle_radius=1) - st.sidebar.markdown ('---------' ) - - st.markdown( - """ - - """,unsafe_allow_html=True,) - - # st.markdown("**Detected Hands**") - st.header("** Detected Hands **") - kpi1_text = st.markdown("0") - - max_hands= st.sidebar.number_input('Maximum Number of Hands',value=2,min_value=1,max_value=4) - # st.sidebar('---') - detection_confidence= st.sidebar.slider('Detection Confidence',min_value=0.0,max_value=1.0,value=0.5) - st.sidebar.markdown('---') - IMAGE_FILE=[] - count=0 - - img_file_buffer = st.sidebar.file_uploader("Upload an Image", type=["jpg","jpeg", "png"]) - if img_file_buffer is not None: - file_bytes = np.asarray(bytearray(img_file_buffer.read()), dtype=np.uint8) - opencv_image = cv2.imdecode(file_bytes, 1) - image = opencv_image.copy() - - else: - demo_image= DEMO_IMAGE - - image = 'demo.jpg' - cap = cv2.imread('demo.jpg', cv2.IMREAD_UNCHANGED) - image = cap.copy() - - # st.sidebar.text('Input Image') - st.sidebar.subheader('Input Image') - image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB) - st.sidebar.image(image) - st.sidebar.subheader('Demo Images') - - st.sidebar.image('dddemo.jpg') - st.sidebar.image('Screenshot 2022-01-09 161732.png') - st.sidebar.image('woman-showing-four-fingers-white-background-woman-showing-four-fingers-white-background-closeup-hand-134504006.jpg') - - st.sidebar.image('demo.jpg') - hand_count =0 - image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR) - cap = cv2.imread('demo.jpg', cv2.IMREAD_UNCHANGED) - - with mp_hands.Hands( - static_image_mode=True, - max_num_hands=max_hands, - min_detection_confidence=detection_confidence) as hands: - - hand_count+=1 - - results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - - - try: - - age_height, image_width, _ = image.shape - annotated_image = image.copy() - lmList=[] - lmList2forModel=[] - for hand_landmarks in results.multi_hand_landmarks: - - myHands=results.multi_hand_landmarks[0] - - for id,lm in enumerate(myHands.landmark): - - h,w,c=image.shape - cx,cy=int(lm.x*w), int(lm.y*h) - lmList.append([id,cx,cy]) - lmList2forModel.append([cx,cy]) - fingers=[] - if lmList[tipIds[0]][1] < lmList[tipIds[0]-1][1]: - fingers.append(1) - - else: - fingers.append(0) - - - for id in range(1,5): - if lmList[tipIds[id]][2] < lmList[tipIds[id]-1][2]: - fingers.append(1) - - - else: - fingers.append(0) - - total= fingers.count(1) - - if total==5: - - original_title = '

Acclerate

' - st.markdown(original_title, unsafe_allow_html=True) - # st.markdown('---') - - - overlay = cv2.imread('istockphoto-1179377734-612x612.jpg') - overlay = cv2.resize(overlay,(100,100)) - x_offset = 80 - y_offset = 10 - x_end = x_offset + overlay.shape[1] - y_end = y_offset + overlay.shape[0] - annotated_image[y_offset:y_end,x_offset:x_end] = overlay - - if total==4: - # st.text('Right') - original_title = '

Right

' - st.markdown(original_title, unsafe_allow_html=True) - overlay = cv2.imread('istockphoto-1179377734-612x612 (4).jpg') - overlay = cv2.resize(overlay,(100,100)) - x_offset = 120 - y_offset = 50 - x_end = x_offset + overlay.shape[1] - y_end = y_offset + overlay.shape[0] - annotated_image[y_offset:y_end,x_offset:x_end] = overlay - - if total==2 or total==3: - # st.text('Left') - original_title = '

Left

' - st.markdown(original_title, unsafe_allow_html=True) - overlay = cv2.imread('istockphoto-1179377734-612x612 (1).jpg') - overlay = cv2.resize(overlay,(100,100)) - x_offset = 50 - y_offset = 50 - x_end = x_offset + overlay.shape[1] - y_end = y_offset + overlay.shape[0] - annotated_image[y_offset:y_end,x_offset:x_end] = overlay - - if total==0: - # st.text('Brake') - original_title = '

Brake

' - st.markdown(original_title, unsafe_allow_html=True) - overlay = cv2.imread('istockphoto-1179377734-612x612 (3).jpg') - overlay = cv2.resize(overlay,(100,100)) - x_offset = 50 - y_offset = 90 - x_end = x_offset + overlay.shape[1] - y_end = y_offset + overlay.shape[0] - annotated_image[y_offset:y_end,x_offset:x_end] = overlay - mp_drawing.draw_landmarks( - annotated_image, - hand_landmarks, - mp_hands.HAND_CONNECTIONS, - mp_draw.DrawingSpec(color=(0,0,255), thickness=2, circle_radius=2), - mp_draw.DrawingSpec(color=(0,255,0), thickness=2, circle_radius=2)) - kpi1_text.write(f"

{hand_count}

", unsafe_allow_html=True) - except TypeError: - original_title = '

Sorry, No Hand Found!!

' - st.markdown(original_title, unsafe_allow_html=True) - - kpi1_text.write(f"

{0}

", unsafe_allow_html=True) - - - st.subheader('Output Image') - annotated_image = cv2.cvtColor(annotated_image,cv2.COLOR_BGR2RGB) - st.image(annotated_image, use_column_width=False) - -# ---------------------------------------------------------------------- - -elif app_mode == 'Run On Video': - - st.set_option('deprecation.showfileUploaderEncoding',False) - use_webcam = st.sidebar.button('Use Webcam') - record= st.sidebar.checkbox("Record Video") - - if record: - st.checkbox("Recording",value=True) - - - st.markdown( - """ - - """,unsafe_allow_html=True,) - - - max_hands= st.sidebar.number_input('Maximum Number of Hand',value=1,min_value=1,max_value=4) - detection_confidence= st.sidebar.slider('Detection Confidence',min_value=0.0,max_value=1.0,value=0.5) - tracking_confidence= st.sidebar.slider('Tracking Confidence Confidence',min_value=0.0,max_value=1.0,value=0.5) - st.sidebar.markdown('---') - - st.subheader("Input Video") - - stframe = st.empty() - video_file_buffer = st.sidebar.file_uploader("Upload a Video", type=['mp4', 'mov', 'avi', 'asf', 'm4v']) - tffile = tempfile.NamedTemporaryFile(delete=False) - #We get our input video here - if not video_file_buffer: - if use_webcam: - vid = cv2.VideoCapture(0) - else: - vid = cv2.VideoCapture(DEMO_VIDEO) - tffile.name = DEMO_VIDEO - else: - tffile.write(video_file_buffer.read()) - vid = cv2.VideoCapture(tffile.name) - width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) - height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps_input = int(vid.get(cv2.CAP_PROP_FPS)) - - #Recording Part - codec = cv2.VideoWriter_fourcc('V', 'P', '0','9') - out= cv2.VideoWriter('output.mp4',codec,fps_input,(width,height)) - - st.sidebar.text('Input Video') - st.sidebar.video(tffile.name) - - fps = 0 - i = 0 - drawing_spec = mp_drawing.DrawingSpec(thickness=2, circle_radius=1) - kpi1, kpi2, kpi3 = st.columns(3) - with kpi1: - original_title = '

Frame Rate

' - st.markdown(original_title, unsafe_allow_html=True) - kpi1_text = st.markdown ("0") - with kpi2: - original_title = '

Detected Hands

' - st.markdown(original_title, unsafe_allow_html=True) - kpi2_text = st.markdown ("0") - with kpi3: - original_title = '

Video Width

' - st.markdown(original_title, unsafe_allow_html=True) - kpi3_text = st.markdown("0") - st.markdown ("
", unsafe_allow_html=True) - st.subheader('Reload , if webpage hangs') - drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) - - with mp_hand.Hands(max_num_hands=max_hands,min_detection_confidence=detection_confidence, - min_tracking_confidence=tracking_confidence) as hands: - - - while vid.isOpened(): - - i +=1 - ret, image = vid.read() - if not ret: - continue - - - image.flags.writeable=False - results= hands.process(image) - image.flags.writeable=True - image= cv2.cvtColor(image,cv2.COLOR_RGB2BGR) - - lmList=[] - lmList2forModel=[] - hand_count=0 - if results.multi_hand_landmarks: - - for hand_landmark in results.multi_hand_landmarks: - hand_count += 1 - myHands=results.multi_hand_landmarks[0] - for id,lm in enumerate(myHands.landmark): - h,w,c=image.shape - cx,cy=int(lm.x*w), int(lm.y*h) - lmList.append([id,cx,cy]) - lmList2forModel.append([cx,cy]) - - if lmList[tipIds[0]][1] > lmList[tipIds[0]-1][1]: - fingers.append(1) - - else: - fingers.append(0) - - - for id in range(1,5): - if lmList[tipIds[id]][2] < lmList[tipIds[id]-1][2]: - fingers.append(1) - - - else: - fingers.append(0) - - total= fingers.count(1) - if total==5: - sh= "Acclerate" - draw(sh) - if total==2 or total==3: - sh= "Left" - draw(sh) - if total==4: - sh= "Right" - draw(sh) - if total==0: - sh= "Brake" - draw(sh) - - mp_draw.draw_landmarks(image,hand_landmark,mp_hand.HAND_CONNECTIONS, - mp_draw.DrawingSpec(color=(0,0,255), thickness=2, circle_radius=2), - mp_draw.DrawingSpec(color=(0,255,0), thickness=2, circle_radius=2)) - - #FPS Counter Logic - currTime = time.time() - fps = 1/ (currTime - prevTime) - prevTime = currTime - fingers=[] - - if record: - out.write(image) - image= cv2.cvtColor(image,cv2.COLOR_BGR2RGB) - kpi1_text.write(f"

{int(fps)}

", unsafe_allow_html=True) - kpi2_text.write(f"

{hand_count}

", unsafe_allow_html=True) - - kpi3_text.write(f"

{width}

", unsafe_allow_html=True) - - image = cv2.resize(image, (0,0), fx = 0.8, fy =0.8) - image = image_resize(image = image, width = 320,height=360) - stframe.image(image, channels = 'BGR', use_column_width=False) - st.subheader('Output Image') - st.text('Video Processed') - output_video = open('output1.mp4','rb') - out_bytes= output_video.read() - st.video(out_bytes) - - vid.release() - out.release() - -# ---------------------------------------------------------------------- - -elif app_mode == 'Show Code': - agree = st.checkbox('Show Only Game Code') - - if agree: - st.subheader('Game Code') - uuu12='''''' - - uuu13='''''' - - st.components.v1.html(uuu12,height=470,scrolling=True) - st.components.v1.html(uuu13,height=470,scrolling=True) - - - # code = '''def hello(): - # print("Helliiiiiiiio, Streamlit!")''' - # st.code(code, language='python') - - else: - st.subheader('Streamlit Code') - # 11898 - uuu= '''''' - - uuu2= '''''' - - uuu3='''''' - - uuu4='''''' - - uuu5='''''' - - uuu6='''''' - - uuu7='''''' - - uuu8='''''' - - uuu9='''''' - - uuu10='''''' - - uuu11='''''' - st.components.v1.html(uuu,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu2,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu3,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu4,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu5,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu6,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu7,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu8,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu9,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu10,width=1024,height=1000,scrolling=True) - st.components.v1.html(uuu11,width=1024,height=568,scrolling=True) - - - - - # pl= "def hel()" - # code = st_ace(language='python',theme='dracula',placeholder=pl) - # '''def hello(): - # print("Hello, Streamlit!")''' - # st.st_ace(code, language='python',theme='cobalt') - - - - - - - - \ No newline at end of file diff --git a/spaces/shinexyt/StaticDemo/README.md b/spaces/shinexyt/StaticDemo/README.md deleted file mode 100644 index d98d49d60c9cb16fefc45856551c5d7db862d726..0000000000000000000000000000000000000000 --- a/spaces/shinexyt/StaticDemo/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: StaticDemo -emoji: 🚀 -colorFrom: gray -colorTo: gray -sdk: static -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/shriarul5273/Yolov7/README.md b/spaces/shriarul5273/Yolov7/README.md deleted file mode 100644 index 07d9c1e3bb6158852cc2112fa8c3cd6b84f60eaa..0000000000000000000000000000000000000000 --- a/spaces/shriarul5273/Yolov7/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Yolov7 -emoji: 🚀 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.44.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bus Simulator Indonesia Mod APK 3.7 The Ultimate Bus Driving Game for Android Users.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bus Simulator Indonesia Mod APK 3.7 The Ultimate Bus Driving Game for Android Users.md deleted file mode 100644 index ec200ffcdb01b2b71e1c24905f35ee349ec02140..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bus Simulator Indonesia Mod APK 3.7 The Ultimate Bus Driving Game for Android Users.md +++ /dev/null @@ -1,96 +0,0 @@ - -

Bus Simulator Indonesia Mod APK Version 3.7: How to Download and Play

-

Do you love driving buses and exploring new places? If yes, then you should try Bus Simulator Indonesia, a popular simulation game that lets you experience the life of a bus driver in Indonesia. In this game, you can choose from various types of buses, customize them with your own design, and drive them on realistic roads and cities in Indonesia. You can also complete missions, earn rewards, and interact with other players online.

-

bus simulator indonesia mod apk version 3.7 bus simulator indonesia mod apk download


Download ——— https://ssurll.com/2uNXUh



-

But what if you want to enjoy more features and benefits in the game? Well, you can do that by using Bus Simulator Indonesia Mod APK Version 3.7, a modified version of the game that gives you unlimited money, unlocked buses, free shopping, and more. In this article, we will tell you everything you need to know about Bus Simulator Indonesia Mod APK Version 3.7, including how to download and install it on your Android device, how to play it, and some tips and tricks to make the most out of it.

-

What is Bus Simulator Indonesia?

-

Bus Simulator Indonesia is a simulation game developed by Maleo, an Indonesian game studio. It was released in 2017 and has since gained millions of downloads and positive reviews from players around the world. The game is also known as BUSSID among fans.

-

Features of Bus Simulator Indonesia

-

Some of the features that make Bus Simulator Indonesia stand out from other simulation games are:

-
    -
  • It has authentic Indonesian buses, such as Transjakarta, Kopaja, Angkot, etc.
  • -
  • It has realistic Indonesian environments, such as Jakarta, Surabaya, Bali, etc.
  • -
  • It has a user-friendly interface and easy controls.
  • -
  • It has a custom livery editor that allows you to create your own bus design.
  • -
  • It has a multiplayer mode that lets you chat and compete with other players online.
  • -
  • It has regular updates that add new features and improvements to the game.
  • -
-

Why use Bus Simulator Indonesia Mod APK?

-

While Bus Simulator Indonesia is a fun and enjoyable game, it also has some limitations that may affect your gaming experience. For example, you need to spend real money to buy coins or diamonds in the game, which are used to buy new buses or upgrade existing ones. You also need to complete certain missions or achievements to unlock new features or locations in the game.

-

bus simulator indonesia 3.7 mod apk unlimited money
-bus simulator indonesia mod apk 3.7 free download for android
-bus simulator indonesia version 3.7 mod apk latest
-download bus simulator indonesia 3.7 mod apk terbaru
-bus simulator indonesia mod apk v3.7 unlocked all
-bus simulator indonesia 3.7 mod apk android 1
-bus simulator indonesia mod apk 3.7 offline
-bus simulator indonesia version 3.7 mod apk revdl
-download bus simulator indonesia 3.7 mod apk obb
-bus simulator indonesia mod apk v3.7 hack
-bus simulator indonesia 3.7 mod apk rexdl
-bus simulator indonesia mod apk 3.7 no ads
-bus simulator indonesia version 3.7 mod apk unlimited fuel
-download bus simulator indonesia 3.7 mod apk data
-bus simulator indonesia mod apk v3.7 new update
-bus simulator indonesia 3.7 mod apk happymod
-bus simulator indonesia mod apk 3.7 online
-bus simulator indonesia version 3.7 mod apk an1
-download bus simulator indonesia 3.7 mod apk pure
-bus simulator indonesia mod apk v3.7 mega
-bus simulator indonesia 3.7 mod apk apkpure
-bus simulator indonesia mod apk 3.7 original
-bus simulator indonesia version 3.7 mod apk uptodown
-download bus simulator indonesia 3.7 mod apk full version
-bus simulator indonesia mod apk v3.7 pro

-

If you want to bypass these limitations and have more freedom and flexibility in the game, you can use Bus Simulator Indonesia Mod APK Version 3.7. This is a modified version of the game that gives you access to unlimited money, unlocked buses, free shopping, no ads, and more. With these advantages, you can enjoy the game without any restrictions or interruptions.

-

How to download and install Bus Simulator Indonesia Mod APK Version 3.7

-

If you are interested in downloading and installing Bus Simulator Indonesia Mod APK Version 3.7 on your Android device, you need to follow some simple steps. But before that, you need to make sure that your device meets some requirements.

-

Requirements for Bus Simulator Indonesia Mod APK Version 3.7

-

The requirements for Bus Simulator Indonesia Mod APK Version 3.7 are:

-
    -
  • Your device should have Android 4.2 or higher version.
  • -
  • Your device should have at least 2 GB of RAM and 300 MB of free storage space.
  • -
  • Your device should allow installation of apps from unknown sources. You can enable this option by going to Settings > Security > Unknown Sources and turning it on.
  • -
-

Steps to download and install Bus Simulator Indonesia Mod APK Version 3.7

-

Once you have checked the requirements, you can follow these steps to download and install Bus Simulator Indonesia Mod APK Version 3.7 on your device:

-
    -
  1. Go to the link and download the Bus Simulator Indonesia Mod APK Version 3.7 file on your device.
  2. -
  3. Locate the downloaded file in your device's file manager and tap on it to start the installation process.
  4. -
  5. Follow the instructions on the screen and wait for the installation to complete.
  6. -
  7. Launch the game from your app drawer and enjoy playing Bus Simulator Indonesia Mod APK Version 3.7.
  8. -
-

How to play Bus Simulator Indonesia Mod APK Version 3.7

-

Playing Bus Simulator Indonesia Mod APK Version 3.7 is similar to playing the original game, but with more features and benefits. Here are some of the things you can do in the game:

-

Choose your bus and customize it

-

In Bus Simulator Indonesia Mod APK Version 3.7, you can choose from a variety of buses, such as mini buses, double deckers, articulated buses, etc. You can also customize your bus with your own design, using the custom livery editor. You can change the color, pattern, logo, and other details of your bus. You can also use the free shopping feature to buy new buses or upgrade existing ones without spending any money.

-

Drive your bus on realistic Indonesian roads and cities

-

In Bus Simulator Indonesia Mod APK Version 3.7, you can drive your bus on realistic roads and cities in Indonesia, such as Jakarta, Surabaya, Bali, etc. You can also explore different landmarks, such as monuments, bridges, temples, etc. You can also experience different weather conditions, such as rain, fog, night, etc. You can also interact with other vehicles and pedestrians on the road, such as cars, trucks, motorcycles, etc.

-

Complete missions and earn rewards

-

In Bus Simulator Indonesia Mod APK Version 3.7, you can complete various missions and earn rewards in the game. Some of the missions include transporting passengers, delivering cargo, racing with other buses, etc. You can also earn coins and diamonds by completing achievements or watching ads in the game. You can use these currencies to buy new buses or upgrade existing ones in the game. You can also use the unlimited money feature to get unlimited coins and diamonds in the game.

-

Tips and tricks for Bus Simulator Indonesia Mod APK Version 3.7

-

To make your gaming experience more enjoyable and successful in Bus Simulator Indonesia Mod APK Version 3.7, here are some tips and tricks you can follow:

-

Use the map and GPS to navigate

-

In Bus Simulator Indonesia Mod APK Version 3.7, you can use the map and GPS to navigate your way around the roads and cities in Indonesia. You can see your current location, destination, route, distance, time, etc. on the map. You can also zoom in or out of the map to see more details. You can also use the GPS voice guidance to get directions and instructions on where to go.

-

Follow the traffic rules and avoid accidents

-

In Bus Simulator Indonesia Mod APK Version 3.7, you should follow the traffic rules and avoid accidents on the road. You should obey the traffic lights, signs, signals, speed limits, etc. You should also avoid hitting other vehicles or pedestrians on the road. If you cause an accident or violate a traffic rule, you may lose points or money in the game. You may also damage your bus or injure your passengers.

-

Upgrade your bus and unlock new features

-

In Bus Simulator Indonesia Mod APK Version 3.7 , you can upgrade your bus and unlock new features in the game. You can improve the performance, appearance, and functionality of your bus by upgrading its engine, transmission, brakes, suspension, tires, etc. You can also unlock new features, such as horn, lights, wipers, doors, etc. by completing missions or achievements in the game. You can also use the free shopping feature to buy or upgrade anything in the game without spending any money.

-

Conclusion

-

Bus Simulator Indonesia Mod APK Version 3.7 is a great simulation game that lets you experience the life of a bus driver in Indonesia. You can choose from various types of buses, customize them with your own design, and drive them on realistic roads and cities in Indonesia. You can also complete missions, earn rewards, and interact with other players online. You can also enjoy more features and benefits by using Bus Simulator Indonesia Mod APK Version 3.7, such as unlimited money, unlocked buses, free shopping, no ads, and more. To download and install Bus Simulator Indonesia Mod APK Version 3.7 on your Android device, you just need to follow some simple steps that we have explained in this article. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.

-

FAQs

-

Here are some of the frequently asked questions about Bus Simulator Indonesia Mod APK Version 3.7:

-
    -
  1. Is Bus Simulator Indonesia Mod APK Version 3.7 safe to use?
  2. -

    Yes, Bus Simulator Indonesia Mod APK Version 3.7 is safe to use as long as you download it from a trusted source and scan it with an antivirus before installing it on your device. However, you should be aware that using a modded version of the game may violate the terms and conditions of the original game and may result in your account being banned or suspended.

    -
  3. Is Bus Simulator Indonesia Mod APK Version 3.7 compatible with all Android devices?
  4. -

    No, Bus Simulator Indonesia Mod APK Version 3.7 may not be compatible with all Android devices. It requires Android 4.2 or higher version, 2 GB of RAM, and 300 MB of free storage space to run smoothly on your device. If your device does not meet these requirements, you may face some issues or errors while playing the game.

    -
  5. How to update Bus Simulator Indonesia Mod APK Version 3.7?
  6. -

    To update Bus Simulator Indonesia Mod APK Version 3.7, you need to download the latest version of the modded file from the same source where you downloaded the previous version and install it on your device. You should also backup your game data before updating to avoid losing any progress or settings in the game.

    -
  7. How to uninstall Bus Simulator Indonesia Mod APK Version 3.7?
  8. -

    To uninstall Bus Simulator Indonesia Mod APK Version 3.7, you need to go to Settings > Apps > Bus Simulator Indonesia > Uninstall and confirm your action. You should also delete any leftover files or folders related to the game from your device's file manager.

    -
  9. How to contact the developer of Bus Simulator Indonesia Mod APK Version 3.7?
  10. -

    To contact the developer of Bus Simulator Indonesia Mod APK Version 3.7, you can visit their official website or follow them on their social media accounts. You can also send them an email or leave a review on their app page.

    -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/sino72/Passenger_Reconization/deep_sort/deep_sort/README.md b/spaces/sino72/Passenger_Reconization/deep_sort/deep_sort/README.md deleted file mode 100644 index e89c9b3ea08691210046fbb9184bf8e44e88f29e..0000000000000000000000000000000000000000 --- a/spaces/sino72/Passenger_Reconization/deep_sort/deep_sort/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Deep Sort - -This is the implemention of deep sort with pytorch. \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/examples/classification/demo_classification_afqmc_roberta_deepspeed.sh b/spaces/skf15963/summary/fengshen/examples/classification/demo_classification_afqmc_roberta_deepspeed.sh deleted file mode 100644 index 48b003940a960454912a62731e5aec3b9046a6df..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/classification/demo_classification_afqmc_roberta_deepspeed.sh +++ /dev/null @@ -1,90 +0,0 @@ -MODEL_NAME="IDEA-CCNL/Erlangshen-Roberta-110M-NLI" - -TEXTA_NAME=sentence1 -TEXTB_NAME=sentence2 -LABEL_NAME=label -ID_NAME=id - -BATCH_SIZE=32 -VAL_BATCH_SIZE=32 -ZERO_STAGE=1 -config_json="./ds_config.json" - -cat < $config_json -{ - "train_micro_batch_size_per_gpu": $BATCH_SIZE, - "steps_per_print": 1000, - "gradient_clipping": 0.1, - "zero_optimization": { - "stage": ${ZERO_STAGE} - }, - "zero_allow_untested_optimizer": false, - "fp16": { - "enabled": true, - "loss_scale": 0, - "loss_scale_window": 1000, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "activation_checkpointing": { - "partition_activations": false, - "contiguous_memory_optimization": false - }, - "wall_clock_breakdown": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json - -DATA_ARGS="\ - --dataset_name IDEA-CCNL/AFQMC \ - --train_batchsize $BATCH_SIZE \ - --valid_batchsize $VAL_BATCH_SIZE \ - --max_length 128 \ - --texta_name $TEXTA_NAME \ - --textb_name $TEXTB_NAME \ - --label_name $LABEL_NAME \ - --id_name $ID_NAME \ - " - -MODEL_ARGS="\ - --learning_rate 1e-5 \ - --weight_decay 1e-2 \ - --warmup_ratio 0.01 \ - --num_labels 2 \ - --model_type huggingface-auto \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor val_acc \ - --save_top_k 3 \ - --mode max \ - --every_n_train_steps 0 \ - --save_weights_only True \ - --dirpath . \ - --filename model-{epoch:02d}-{val_acc:.4f} \ - " - - -TRAINER_ARGS="\ - --max_epochs 67 \ - --gpus 1 \ - --num_nodes 1 \ - --strategy deepspeed_stage_${ZERO_STAGE} \ - --gradient_clip_val 1.0 \ - --check_val_every_n_epoch 1 \ - --val_check_interval 1.0 \ - --precision 16 \ - --default_root_dir . \ - " - -options=" \ - --pretrained_model_path $MODEL_NAME \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ - " - -python3 finetune_classification.py $options - diff --git a/spaces/sky24h/Free-View_Expressive_Talking_Head_Video_Editing/face_detection/detection/__init__.py b/spaces/sky24h/Free-View_Expressive_Talking_Head_Video_Editing/face_detection/detection/__init__.py deleted file mode 100644 index 1a6b0402dae864a3cc5dc2a90a412fd842a0efc7..0000000000000000000000000000000000000000 --- a/spaces/sky24h/Free-View_Expressive_Talking_Head_Video_Editing/face_detection/detection/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .core import FaceDetector \ No newline at end of file diff --git a/spaces/skytnt/lyric-generator-ja/frontend/public/index.html b/spaces/skytnt/lyric-generator-ja/frontend/public/index.html deleted file mode 100644 index e152da8ae849c8e51d63b39935b874e5075f88b2..0000000000000000000000000000000000000000 --- a/spaces/skytnt/lyric-generator-ja/frontend/public/index.html +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - - 日语歌词生成器 - - - - - - -
- - - diff --git a/spaces/songweig/rich-text-to-image/models/unet_2d_blocks.py b/spaces/songweig/rich-text-to-image/models/unet_2d_blocks.py deleted file mode 100644 index 22b6a844f66030e67537dce82d02560102659564..0000000000000000000000000000000000000000 --- a/spaces/songweig/rich-text-to-image/models/unet_2d_blocks.py +++ /dev/null @@ -1,3198 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Dict, Optional, Tuple - -import numpy as np -import torch -import torch.nn.functional as F -from torch import nn - -from diffusers.utils import is_torch_version, logging -from diffusers.models.attention import AdaGroupNorm -from models.attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0 -from models.dual_transformer_2d import DualTransformer2DModel -from models.resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D -from models.transformer_2d import Transformer2DModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -def get_down_block( - down_block_type, - num_layers, - in_channels, - out_channels, - temb_channels, - add_downsample, - resnet_eps, - resnet_act_fn, - transformer_layers_per_block=1, - num_attention_heads=None, - resnet_groups=None, - cross_attention_dim=None, - downsample_padding=None, - dual_cross_attention=False, - use_linear_projection=False, - only_cross_attention=False, - upcast_attention=False, - resnet_time_scale_shift="default", - resnet_skip_time_act=False, - resnet_out_scale_factor=1.0, - cross_attention_norm=None, - attention_head_dim=None, - downsample_type=None, -): - # If attn head dim is not defined, we default it to the number of heads - if attention_head_dim is None: - logger.warn( - f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}." - ) - attention_head_dim = num_attention_heads - - down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type - if down_block_type == "DownBlock2D": - return DownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif down_block_type == "ResnetDownsampleBlock2D": - return ResnetDownsampleBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - resnet_time_scale_shift=resnet_time_scale_shift, - skip_time_act=resnet_skip_time_act, - output_scale_factor=resnet_out_scale_factor, - ) - elif down_block_type == "AttnDownBlock2D": - if add_downsample is False: - downsample_type = None - else: - downsample_type = downsample_type or "conv" # default to 'conv' - return AttnDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - attention_head_dim=attention_head_dim, - resnet_time_scale_shift=resnet_time_scale_shift, - downsample_type=downsample_type, - ) - elif down_block_type == "CrossAttnDownBlock2D": - if cross_attention_dim is None: - raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") - return CrossAttnDownBlock2D( - num_layers=num_layers, - transformer_layers_per_block=transformer_layers_per_block, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - cross_attention_dim=cross_attention_dim, - num_attention_heads=num_attention_heads, - dual_cross_attention=dual_cross_attention, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif down_block_type == "SimpleCrossAttnDownBlock2D": - if cross_attention_dim is None: - raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D") - return SimpleCrossAttnDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - cross_attention_dim=cross_attention_dim, - attention_head_dim=attention_head_dim, - resnet_time_scale_shift=resnet_time_scale_shift, - skip_time_act=resnet_skip_time_act, - output_scale_factor=resnet_out_scale_factor, - only_cross_attention=only_cross_attention, - cross_attention_norm=cross_attention_norm, - ) - elif down_block_type == "SkipDownBlock2D": - return SkipDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - downsample_padding=downsample_padding, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif down_block_type == "AttnSkipDownBlock2D": - return AttnSkipDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - attention_head_dim=attention_head_dim, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif down_block_type == "DownEncoderBlock2D": - return DownEncoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif down_block_type == "AttnDownEncoderBlock2D": - return AttnDownEncoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - attention_head_dim=attention_head_dim, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif down_block_type == "KDownBlock2D": - return KDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - ) - elif down_block_type == "KCrossAttnDownBlock2D": - return KCrossAttnDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - cross_attention_dim=cross_attention_dim, - attention_head_dim=attention_head_dim, - add_self_attention=True if not add_downsample else False, - ) - raise ValueError(f"{down_block_type} does not exist.") - - -def get_up_block( - up_block_type, - num_layers, - in_channels, - out_channels, - prev_output_channel, - temb_channels, - add_upsample, - resnet_eps, - resnet_act_fn, - transformer_layers_per_block=1, - num_attention_heads=None, - resnet_groups=None, - cross_attention_dim=None, - dual_cross_attention=False, - use_linear_projection=False, - only_cross_attention=False, - upcast_attention=False, - resnet_time_scale_shift="default", - resnet_skip_time_act=False, - resnet_out_scale_factor=1.0, - cross_attention_norm=None, - attention_head_dim=None, - upsample_type=None, -): - # If attn head dim is not defined, we default it to the number of heads - if attention_head_dim is None: - logger.warn( - f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}." - ) - attention_head_dim = num_attention_heads - - up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type - if up_block_type == "UpBlock2D": - return UpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif up_block_type == "ResnetUpsampleBlock2D": - return ResnetUpsampleBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - resnet_time_scale_shift=resnet_time_scale_shift, - skip_time_act=resnet_skip_time_act, - output_scale_factor=resnet_out_scale_factor, - ) - elif up_block_type == "CrossAttnUpBlock2D": - if cross_attention_dim is None: - raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") - return CrossAttnUpBlock2D( - num_layers=num_layers, - transformer_layers_per_block=transformer_layers_per_block, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - cross_attention_dim=cross_attention_dim, - num_attention_heads=num_attention_heads, - dual_cross_attention=dual_cross_attention, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif up_block_type == "SimpleCrossAttnUpBlock2D": - if cross_attention_dim is None: - raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D") - return SimpleCrossAttnUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - cross_attention_dim=cross_attention_dim, - attention_head_dim=attention_head_dim, - resnet_time_scale_shift=resnet_time_scale_shift, - skip_time_act=resnet_skip_time_act, - output_scale_factor=resnet_out_scale_factor, - only_cross_attention=only_cross_attention, - cross_attention_norm=cross_attention_norm, - ) - elif up_block_type == "AttnUpBlock2D": - if add_upsample is False: - upsample_type = None - else: - upsample_type = upsample_type or "conv" # default to 'conv' - - return AttnUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - attention_head_dim=attention_head_dim, - resnet_time_scale_shift=resnet_time_scale_shift, - upsample_type=upsample_type, - ) - elif up_block_type == "SkipUpBlock2D": - return SkipUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif up_block_type == "AttnSkipUpBlock2D": - return AttnSkipUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - attention_head_dim=attention_head_dim, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif up_block_type == "UpDecoderBlock2D": - return UpDecoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - resnet_time_scale_shift=resnet_time_scale_shift, - temb_channels=temb_channels, - ) - elif up_block_type == "AttnUpDecoderBlock2D": - return AttnUpDecoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - attention_head_dim=attention_head_dim, - resnet_time_scale_shift=resnet_time_scale_shift, - temb_channels=temb_channels, - ) - elif up_block_type == "KUpBlock2D": - return KUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - ) - elif up_block_type == "KCrossAttnUpBlock2D": - return KCrossAttnUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - cross_attention_dim=cross_attention_dim, - attention_head_dim=attention_head_dim, - ) - - raise ValueError(f"{up_block_type} does not exist.") - - -class UNetMidBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", # default, spatial - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - add_attention: bool = True, - attention_head_dim=1, - output_scale_factor=1.0, - ): - super().__init__() - resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) - self.add_attention = add_attention - - # there is always at least one resnet - resnets = [ - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ] - attentions = [] - - if attention_head_dim is None: - logger.warn( - f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." - ) - attention_head_dim = in_channels - - for _ in range(num_layers): - if self.add_attention: - attentions.append( - Attention( - in_channels, - heads=in_channels // attention_head_dim, - dim_head=attention_head_dim, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups if resnet_time_scale_shift == "default" else None, - spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, - residual_connection=True, - bias=True, - upcast_softmax=True, - _from_deprecated_attn_block=True, - ) - ) - else: - attentions.append(None) - - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - def forward(self, hidden_states, temb=None): - hidden_states = self.resnets[0](hidden_states, temb) - for attn, resnet in zip(self.attentions, self.resnets[1:]): - if attn is not None: - hidden_states = attn(hidden_states, temb=temb) - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - return hidden_states - - -class UNetMidBlock2DCrossAttn(nn.Module): - def __init__( - self, - in_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - transformer_layers_per_block: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - num_attention_heads=1, - output_scale_factor=1.0, - cross_attention_dim=1280, - dual_cross_attention=False, - use_linear_projection=False, - upcast_attention=False, - ): - super().__init__() - - self.has_cross_attention = True - self.num_attention_heads = num_attention_heads - resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) - - # there is always at least one resnet - resnets = [ - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ] - attentions = [] - - for _ in range(num_layers): - if not dual_cross_attention: - attentions.append( - Transformer2DModel( - num_attention_heads, - in_channels // num_attention_heads, - in_channels=in_channels, - num_layers=transformer_layers_per_block, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - use_linear_projection=use_linear_projection, - upcast_attention=upcast_attention, - ) - ) - else: - attentions.append( - DualTransformer2DModel( - num_attention_heads, - in_channels // num_attention_heads, - in_channels=in_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - ) - ) - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - def forward( - self, - hidden_states: torch.FloatTensor, - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ) -> torch.FloatTensor: - # Rich-Text: ignore the features - hidden_states, _ = self.resnets[0](hidden_states, temb) - for attn, resnet in zip(self.attentions, self.resnets[1:]): - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - cross_attention_kwargs=cross_attention_kwargs, - attention_mask=attention_mask, - encoder_attention_mask=encoder_attention_mask, - return_dict=False, - )[0] - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - return hidden_states - - -class UNetMidBlock2DSimpleCrossAttn(nn.Module): - def __init__( - self, - in_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attention_head_dim=1, - output_scale_factor=1.0, - cross_attention_dim=1280, - skip_time_act=False, - only_cross_attention=False, - cross_attention_norm=None, - ): - super().__init__() - - self.has_cross_attention = True - - self.attention_head_dim = attention_head_dim - resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) - - self.num_heads = in_channels // self.attention_head_dim - - # there is always at least one resnet - resnets = [ - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - ) - ] - attentions = [] - - for _ in range(num_layers): - processor = ( - AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() - ) - - attentions.append( - Attention( - query_dim=in_channels, - cross_attention_dim=in_channels, - heads=self.num_heads, - dim_head=self.attention_head_dim, - added_kv_proj_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - bias=True, - upcast_softmax=True, - only_cross_attention=only_cross_attention, - cross_attention_norm=cross_attention_norm, - processor=processor, - ) - ) - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - def forward( - self, - hidden_states: torch.FloatTensor, - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} - - if attention_mask is None: - # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. - mask = None if encoder_hidden_states is None else encoder_attention_mask - else: - # when attention_mask is defined: we don't even check for encoder_attention_mask. - # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. - # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. - # then we can simplify this whole if/else block to: - # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask - mask = attention_mask - - hidden_states = self.resnets[0](hidden_states, temb) - for attn, resnet in zip(self.attentions, self.resnets[1:]): - # attn - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - attention_mask=mask, - **cross_attention_kwargs, - ) - - # resnet - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - return hidden_states - - -class AttnDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attention_head_dim=1, - output_scale_factor=1.0, - downsample_padding=1, - downsample_type="conv", - ): - super().__init__() - resnets = [] - attentions = [] - self.downsample_type = downsample_type - - if attention_head_dim is None: - logger.warn( - f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." - ) - attention_head_dim = out_channels - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - Attention( - out_channels, - heads=out_channels // attention_head_dim, - dim_head=attention_head_dim, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups, - residual_connection=True, - bias=True, - upcast_softmax=True, - _from_deprecated_attn_block=True, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if downsample_type == "conv": - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - elif downsample_type == "resnet": - self.downsamplers = nn.ModuleList( - [ - ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - down=True, - ) - ] - ) - else: - self.downsamplers = None - - def forward(self, hidden_states, temb=None, upsample_size=None): - output_states = () - - for resnet, attn in zip(self.resnets, self.attentions): - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - hidden_states = attn(hidden_states) - output_states = output_states + (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - if self.downsample_type == "resnet": - hidden_states = downsampler(hidden_states, temb=temb) - else: - hidden_states = downsampler(hidden_states) - - output_states += (hidden_states,) - - return hidden_states, output_states - - -class CrossAttnDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - transformer_layers_per_block: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - num_attention_heads=1, - cross_attention_dim=1280, - output_scale_factor=1.0, - downsample_padding=1, - add_downsample=True, - dual_cross_attention=False, - use_linear_projection=False, - only_cross_attention=False, - upcast_attention=False, - ): - super().__init__() - resnets = [] - attentions = [] - - self.has_cross_attention = True - self.num_attention_heads = num_attention_heads - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - if not dual_cross_attention: - attentions.append( - Transformer2DModel( - num_attention_heads, - out_channels // num_attention_heads, - in_channels=out_channels, - num_layers=transformer_layers_per_block, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - ) - ) - else: - attentions.append( - DualTransformer2DModel( - num_attention_heads, - out_channels // num_attention_heads, - in_channels=out_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - ) - ) - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.FloatTensor, - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - output_states = () - - for resnet, attn in zip(self.resnets, self.attentions): - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module, return_dict=None): - def custom_forward(*inputs): - if return_dict is not None: - return module(*inputs, return_dict=return_dict) - else: - return module(*inputs) - - return custom_forward - - ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), - hidden_states, - temb, - **ckpt_kwargs, - ) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn, return_dict=False), - hidden_states, - encoder_hidden_states, - None, # timestep - None, # class_labels - cross_attention_kwargs, - attention_mask, - encoder_attention_mask, - **ckpt_kwargs, - )[0] - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - cross_attention_kwargs=cross_attention_kwargs, - attention_mask=attention_mask, - encoder_attention_mask=encoder_attention_mask, - return_dict=False, - )[0] - - output_states = output_states + (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states = output_states + (hidden_states,) - - return hidden_states, output_states - - -class DownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, temb=None): - output_states = () - - for resnet in self.resnets: - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - if is_torch_version(">=", "1.11.0"): - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb, use_reentrant=False - ) - else: - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb - ) - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - output_states = output_states + (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states = output_states + (hidden_states,) - - return hidden_states, output_states - - -class DownEncoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - def forward(self, hidden_states): - for resnet in self.resnets: - hidden_states = resnet(hidden_states, temb=None) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - return hidden_states - - -class AttnDownEncoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attention_head_dim=1, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - attentions = [] - - if attention_head_dim is None: - logger.warn( - f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." - ) - attention_head_dim = out_channels - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - Attention( - out_channels, - heads=out_channels // attention_head_dim, - dim_head=attention_head_dim, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups, - residual_connection=True, - bias=True, - upcast_softmax=True, - _from_deprecated_attn_block=True, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - def forward(self, hidden_states): - for resnet, attn in zip(self.resnets, self.attentions): - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb=None) - hidden_states = attn(hidden_states) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - return hidden_states - - -class AttnSkipDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - attention_head_dim=1, - output_scale_factor=np.sqrt(2.0), - add_downsample=True, - ): - super().__init__() - self.attentions = nn.ModuleList([]) - self.resnets = nn.ModuleList([]) - - if attention_head_dim is None: - logger.warn( - f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." - ) - attention_head_dim = out_channels - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - self.resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(in_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - self.attentions.append( - Attention( - out_channels, - heads=out_channels // attention_head_dim, - dim_head=attention_head_dim, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=32, - residual_connection=True, - bias=True, - upcast_softmax=True, - _from_deprecated_attn_block=True, - ) - ) - - if add_downsample: - self.resnet_down = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - down=True, - kernel="fir", - ) - self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) - self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) - else: - self.resnet_down = None - self.downsamplers = None - self.skip_conv = None - - def forward(self, hidden_states, temb=None, skip_sample=None): - output_states = () - - for resnet, attn in zip(self.resnets, self.attentions): - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - hidden_states = attn(hidden_states) - output_states += (hidden_states,) - - if self.downsamplers is not None: - hidden_states = self.resnet_down(hidden_states, temb) - for downsampler in self.downsamplers: - skip_sample = downsampler(skip_sample) - - hidden_states = self.skip_conv(skip_sample) + hidden_states - - output_states += (hidden_states,) - - return hidden_states, output_states, skip_sample - - -class SkipDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - output_scale_factor=np.sqrt(2.0), - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - self.resnets = nn.ModuleList([]) - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - self.resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(in_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - if add_downsample: - self.resnet_down = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - down=True, - kernel="fir", - ) - self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) - self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) - else: - self.resnet_down = None - self.downsamplers = None - self.skip_conv = None - - def forward(self, hidden_states, temb=None, skip_sample=None): - output_states = () - - for resnet in self.resnets: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - output_states += (hidden_states,) - - if self.downsamplers is not None: - hidden_states = self.resnet_down(hidden_states, temb) - for downsampler in self.downsamplers: - skip_sample = downsampler(skip_sample) - - hidden_states = self.skip_conv(skip_sample) + hidden_states - - output_states += (hidden_states,) - - return hidden_states, output_states, skip_sample - - -class ResnetDownsampleBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_downsample=True, - skip_time_act=False, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - down=True, - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, temb=None): - output_states = () - - for resnet in self.resnets: - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - if is_torch_version(">=", "1.11.0"): - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb, use_reentrant=False - ) - else: - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb - ) - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - output_states = output_states + (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states, temb) - - output_states = output_states + (hidden_states,) - - return hidden_states, output_states - - -class SimpleCrossAttnDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attention_head_dim=1, - cross_attention_dim=1280, - output_scale_factor=1.0, - add_downsample=True, - skip_time_act=False, - only_cross_attention=False, - cross_attention_norm=None, - ): - super().__init__() - - self.has_cross_attention = True - - resnets = [] - attentions = [] - - self.attention_head_dim = attention_head_dim - self.num_heads = out_channels // self.attention_head_dim - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - ) - ) - - processor = ( - AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() - ) - - attentions.append( - Attention( - query_dim=out_channels, - cross_attention_dim=out_channels, - heads=self.num_heads, - dim_head=attention_head_dim, - added_kv_proj_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - bias=True, - upcast_softmax=True, - only_cross_attention=only_cross_attention, - cross_attention_norm=cross_attention_norm, - processor=processor, - ) - ) - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - down=True, - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.FloatTensor, - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - output_states = () - cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} - - if attention_mask is None: - # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. - mask = None if encoder_hidden_states is None else encoder_attention_mask - else: - # when attention_mask is defined: we don't even check for encoder_attention_mask. - # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. - # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. - # then we can simplify this whole if/else block to: - # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask - mask = attention_mask - - for resnet, attn in zip(self.resnets, self.attentions): - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module, return_dict=None): - def custom_forward(*inputs): - if return_dict is not None: - return module(*inputs, return_dict=return_dict) - else: - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn, return_dict=False), - hidden_states, - encoder_hidden_states, - mask, - cross_attention_kwargs, - )[0] - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - attention_mask=mask, - **cross_attention_kwargs, - ) - - output_states = output_states + (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states, temb) - - output_states = output_states + (hidden_states,) - - return hidden_states, output_states - - -class KDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 4, - resnet_eps: float = 1e-5, - resnet_act_fn: str = "gelu", - resnet_group_size: int = 32, - add_downsample=False, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - groups = in_channels // resnet_group_size - groups_out = out_channels // resnet_group_size - - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - dropout=dropout, - temb_channels=temb_channels, - groups=groups, - groups_out=groups_out, - eps=resnet_eps, - non_linearity=resnet_act_fn, - time_embedding_norm="ada_group", - conv_shortcut_bias=False, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - # YiYi's comments- might be able to use FirDownsample2D, look into details later - self.downsamplers = nn.ModuleList([KDownsample2D()]) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, temb=None): - output_states = () - - for resnet in self.resnets: - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - if is_torch_version(">=", "1.11.0"): - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb, use_reentrant=False - ) - else: - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb - ) - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - output_states += (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - return hidden_states, output_states - - -class KCrossAttnDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - cross_attention_dim: int, - dropout: float = 0.0, - num_layers: int = 4, - resnet_group_size: int = 32, - add_downsample=True, - attention_head_dim: int = 64, - add_self_attention: bool = False, - resnet_eps: float = 1e-5, - resnet_act_fn: str = "gelu", - ): - super().__init__() - resnets = [] - attentions = [] - - self.has_cross_attention = True - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - groups = in_channels // resnet_group_size - groups_out = out_channels // resnet_group_size - - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - dropout=dropout, - temb_channels=temb_channels, - groups=groups, - groups_out=groups_out, - eps=resnet_eps, - non_linearity=resnet_act_fn, - time_embedding_norm="ada_group", - conv_shortcut_bias=False, - ) - ) - attentions.append( - KAttentionBlock( - out_channels, - out_channels // attention_head_dim, - attention_head_dim, - cross_attention_dim=cross_attention_dim, - temb_channels=temb_channels, - attention_bias=True, - add_self_attention=add_self_attention, - cross_attention_norm="layer_norm", - group_size=resnet_group_size, - ) - ) - - self.resnets = nn.ModuleList(resnets) - self.attentions = nn.ModuleList(attentions) - - if add_downsample: - self.downsamplers = nn.ModuleList([KDownsample2D()]) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.FloatTensor, - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - output_states = () - - for resnet, attn in zip(self.resnets, self.attentions): - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module, return_dict=None): - def custom_forward(*inputs): - if return_dict is not None: - return module(*inputs, return_dict=return_dict) - else: - return module(*inputs) - - return custom_forward - - ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), - hidden_states, - temb, - **ckpt_kwargs, - ) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn, return_dict=False), - hidden_states, - encoder_hidden_states, - temb, - attention_mask, - cross_attention_kwargs, - encoder_attention_mask, - **ckpt_kwargs, - ) - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - emb=temb, - attention_mask=attention_mask, - cross_attention_kwargs=cross_attention_kwargs, - encoder_attention_mask=encoder_attention_mask, - ) - - if self.downsamplers is None: - output_states += (None,) - else: - output_states += (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - return hidden_states, output_states - - -class AttnUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attention_head_dim=1, - output_scale_factor=1.0, - upsample_type="conv", - ): - super().__init__() - resnets = [] - attentions = [] - - self.upsample_type = upsample_type - - if attention_head_dim is None: - logger.warn( - f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." - ) - attention_head_dim = out_channels - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - Attention( - out_channels, - heads=out_channels // attention_head_dim, - dim_head=attention_head_dim, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups, - residual_connection=True, - bias=True, - upcast_softmax=True, - _from_deprecated_attn_block=True, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if upsample_type == "conv": - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - elif upsample_type == "resnet": - self.upsamplers = nn.ModuleList( - [ - ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - up=True, - ) - ] - ) - else: - self.upsamplers = None - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None): - for resnet, attn in zip(self.resnets, self.attentions): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - hidden_states = attn(hidden_states) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - if self.upsample_type == "resnet": - hidden_states = upsampler(hidden_states, temb=temb) - else: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class CrossAttnUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - prev_output_channel: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - transformer_layers_per_block: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - num_attention_heads=1, - cross_attention_dim=1280, - output_scale_factor=1.0, - add_upsample=True, - dual_cross_attention=False, - use_linear_projection=False, - only_cross_attention=False, - upcast_attention=False, - ): - super().__init__() - resnets = [] - attentions = [] - - self.has_cross_attention = True - self.num_attention_heads = num_attention_heads - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - if not dual_cross_attention: - attentions.append( - Transformer2DModel( - num_attention_heads, - out_channels // num_attention_heads, - in_channels=out_channels, - num_layers=transformer_layers_per_block, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - ) - ) - else: - attentions.append( - DualTransformer2DModel( - num_attention_heads, - out_channels // num_attention_heads, - in_channels=out_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - ) - ) - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.FloatTensor, - res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - upsample_size: Optional[int] = None, - attention_mask: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - for resnet, attn in zip(self.resnets, self.attentions): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module, return_dict=None): - def custom_forward(*inputs): - if return_dict is not None: - return module(*inputs, return_dict=return_dict) - else: - return module(*inputs) - - return custom_forward - - ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), - hidden_states, - temb, - **ckpt_kwargs, - ) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn, return_dict=False), - hidden_states, - encoder_hidden_states, - None, # timestep - None, # class_labels - cross_attention_kwargs, - attention_mask, - encoder_attention_mask, - **ckpt_kwargs, - )[0] - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - cross_attention_kwargs=cross_attention_kwargs, - attention_mask=attention_mask, - encoder_attention_mask=encoder_attention_mask, - return_dict=False, - )[0] - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, upsample_size) - - return hidden_states - - -class UpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - if is_torch_version(">=", "1.11.0"): - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb, use_reentrant=False - ) - else: - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb - ) - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, upsample_size) - - return hidden_states - - -class UpDecoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", # default, spatial - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_upsample=True, - temb_channels=None, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - input_channels = in_channels if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=input_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - def forward(self, hidden_states, temb=None): - for resnet in self.resnets: - hidden_states = resnet(hidden_states, temb=temb) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class AttnUpDecoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attention_head_dim=1, - output_scale_factor=1.0, - add_upsample=True, - temb_channels=None, - ): - super().__init__() - resnets = [] - attentions = [] - - if attention_head_dim is None: - logger.warn( - f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}." - ) - attention_head_dim = out_channels - - for i in range(num_layers): - input_channels = in_channels if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=input_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - Attention( - out_channels, - heads=out_channels // attention_head_dim, - dim_head=attention_head_dim, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups if resnet_time_scale_shift != "spatial" else None, - spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, - residual_connection=True, - bias=True, - upcast_softmax=True, - _from_deprecated_attn_block=True, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - def forward(self, hidden_states, temb=None): - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb=temb) - hidden_states = attn(hidden_states, temb=temb) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class AttnSkipUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - attention_head_dim=1, - output_scale_factor=np.sqrt(2.0), - add_upsample=True, - ): - super().__init__() - self.attentions = nn.ModuleList([]) - self.resnets = nn.ModuleList([]) - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - self.resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(resnet_in_channels + res_skip_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - if attention_head_dim is None: - logger.warn( - f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}." - ) - attention_head_dim = out_channels - - self.attentions.append( - Attention( - out_channels, - heads=out_channels // attention_head_dim, - dim_head=attention_head_dim, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=32, - residual_connection=True, - bias=True, - upcast_softmax=True, - _from_deprecated_attn_block=True, - ) - ) - - self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) - if add_upsample: - self.resnet_up = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - up=True, - kernel="fir", - ) - self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - self.skip_norm = torch.nn.GroupNorm( - num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True - ) - self.act = nn.SiLU() - else: - self.resnet_up = None - self.skip_conv = None - self.skip_norm = None - self.act = None - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - hidden_states = self.attentions[0](hidden_states) - - if skip_sample is not None: - skip_sample = self.upsampler(skip_sample) - else: - skip_sample = 0 - - if self.resnet_up is not None: - skip_sample_states = self.skip_norm(hidden_states) - skip_sample_states = self.act(skip_sample_states) - skip_sample_states = self.skip_conv(skip_sample_states) - - skip_sample = skip_sample + skip_sample_states - - hidden_states = self.resnet_up(hidden_states, temb) - - return hidden_states, skip_sample - - -class SkipUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - output_scale_factor=np.sqrt(2.0), - add_upsample=True, - upsample_padding=1, - ): - super().__init__() - self.resnets = nn.ModuleList([]) - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - self.resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min((resnet_in_channels + res_skip_channels) // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) - if add_upsample: - self.resnet_up = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - up=True, - kernel="fir", - ) - self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - self.skip_norm = torch.nn.GroupNorm( - num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True - ) - self.act = nn.SiLU() - else: - self.resnet_up = None - self.skip_conv = None - self.skip_norm = None - self.act = None - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - if skip_sample is not None: - skip_sample = self.upsampler(skip_sample) - else: - skip_sample = 0 - - if self.resnet_up is not None: - skip_sample_states = self.skip_norm(hidden_states) - skip_sample_states = self.act(skip_sample_states) - skip_sample_states = self.skip_conv(skip_sample_states) - - skip_sample = skip_sample + skip_sample_states - - hidden_states = self.resnet_up(hidden_states, temb) - - return hidden_states, skip_sample - - -class ResnetUpsampleBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_upsample=True, - skip_time_act=False, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList( - [ - ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - up=True, - ) - ] - ) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - if is_torch_version(">=", "1.11.0"): - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb, use_reentrant=False - ) - else: - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb - ) - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, temb) - - return hidden_states - - -class SimpleCrossAttnUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - prev_output_channel: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attention_head_dim=1, - cross_attention_dim=1280, - output_scale_factor=1.0, - add_upsample=True, - skip_time_act=False, - only_cross_attention=False, - cross_attention_norm=None, - ): - super().__init__() - resnets = [] - attentions = [] - - self.has_cross_attention = True - self.attention_head_dim = attention_head_dim - - self.num_heads = out_channels // self.attention_head_dim - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - ) - ) - - processor = ( - AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() - ) - - attentions.append( - Attention( - query_dim=out_channels, - cross_attention_dim=out_channels, - heads=self.num_heads, - dim_head=self.attention_head_dim, - added_kv_proj_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - bias=True, - upcast_softmax=True, - only_cross_attention=only_cross_attention, - cross_attention_norm=cross_attention_norm, - processor=processor, - ) - ) - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList( - [ - ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - skip_time_act=skip_time_act, - up=True, - ) - ] - ) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.FloatTensor, - res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - upsample_size: Optional[int] = None, - attention_mask: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} - - if attention_mask is None: - # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. - mask = None if encoder_hidden_states is None else encoder_attention_mask - else: - # when attention_mask is defined: we don't even check for encoder_attention_mask. - # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. - # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. - # then we can simplify this whole if/else block to: - # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask - mask = attention_mask - - for resnet, attn in zip(self.resnets, self.attentions): - # resnet - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module, return_dict=None): - def custom_forward(*inputs): - if return_dict is not None: - return module(*inputs, return_dict=return_dict) - else: - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn, return_dict=False), - hidden_states, - encoder_hidden_states, - mask, - cross_attention_kwargs, - )[0] - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - attention_mask=mask, - **cross_attention_kwargs, - ) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, temb) - - return hidden_states - - -class KUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 5, - resnet_eps: float = 1e-5, - resnet_act_fn: str = "gelu", - resnet_group_size: Optional[int] = 32, - add_upsample=True, - ): - super().__init__() - resnets = [] - k_in_channels = 2 * out_channels - k_out_channels = in_channels - num_layers = num_layers - 1 - - for i in range(num_layers): - in_channels = k_in_channels if i == 0 else out_channels - groups = in_channels // resnet_group_size - groups_out = out_channels // resnet_group_size - - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=k_out_channels if (i == num_layers - 1) else out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=groups, - groups_out=groups_out, - dropout=dropout, - non_linearity=resnet_act_fn, - time_embedding_norm="ada_group", - conv_shortcut_bias=False, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([KUpsample2D()]) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None): - res_hidden_states_tuple = res_hidden_states_tuple[-1] - if res_hidden_states_tuple is not None: - hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1) - - for resnet in self.resnets: - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - if is_torch_version(">=", "1.11.0"): - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb, use_reentrant=False - ) - else: - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb - ) - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class KCrossAttnUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 4, - resnet_eps: float = 1e-5, - resnet_act_fn: str = "gelu", - resnet_group_size: int = 32, - attention_head_dim=1, # attention dim_head - cross_attention_dim: int = 768, - add_upsample: bool = True, - upcast_attention: bool = False, - ): - super().__init__() - resnets = [] - attentions = [] - - is_first_block = in_channels == out_channels == temb_channels - is_middle_block = in_channels != out_channels - add_self_attention = True if is_first_block else False - - self.has_cross_attention = True - self.attention_head_dim = attention_head_dim - - # in_channels, and out_channels for the block (k-unet) - k_in_channels = out_channels if is_first_block else 2 * out_channels - k_out_channels = in_channels - - num_layers = num_layers - 1 - - for i in range(num_layers): - in_channels = k_in_channels if i == 0 else out_channels - groups = in_channels // resnet_group_size - groups_out = out_channels // resnet_group_size - - if is_middle_block and (i == num_layers - 1): - conv_2d_out_channels = k_out_channels - else: - conv_2d_out_channels = None - - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - conv_2d_out_channels=conv_2d_out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=groups, - groups_out=groups_out, - dropout=dropout, - non_linearity=resnet_act_fn, - time_embedding_norm="ada_group", - conv_shortcut_bias=False, - ) - ) - attentions.append( - KAttentionBlock( - k_out_channels if (i == num_layers - 1) else out_channels, - k_out_channels // attention_head_dim - if (i == num_layers - 1) - else out_channels // attention_head_dim, - attention_head_dim, - cross_attention_dim=cross_attention_dim, - temb_channels=temb_channels, - attention_bias=True, - add_self_attention=add_self_attention, - cross_attention_norm="layer_norm", - upcast_attention=upcast_attention, - ) - ) - - self.resnets = nn.ModuleList(resnets) - self.attentions = nn.ModuleList(attentions) - - if add_upsample: - self.upsamplers = nn.ModuleList([KUpsample2D()]) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.FloatTensor, - res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], - temb: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - upsample_size: Optional[int] = None, - attention_mask: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - res_hidden_states_tuple = res_hidden_states_tuple[-1] - if res_hidden_states_tuple is not None: - hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1) - - for resnet, attn in zip(self.resnets, self.attentions): - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module, return_dict=None): - def custom_forward(*inputs): - if return_dict is not None: - return module(*inputs, return_dict=return_dict) - else: - return module(*inputs) - - return custom_forward - - ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), - hidden_states, - temb, - **ckpt_kwargs, - ) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn, return_dict=False), - hidden_states, - encoder_hidden_states, - temb, - attention_mask, - cross_attention_kwargs, - encoder_attention_mask, - **ckpt_kwargs, - )[0] - else: - # Rich-Text: ignore the features - hidden_states, _ = resnet(hidden_states, temb) - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - emb=temb, - attention_mask=attention_mask, - cross_attention_kwargs=cross_attention_kwargs, - encoder_attention_mask=encoder_attention_mask, - ) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -# can potentially later be renamed to `No-feed-forward` attention -class KAttentionBlock(nn.Module): - r""" - A basic Transformer block. - - Parameters: - dim (`int`): The number of channels in the input and output. - num_attention_heads (`int`): The number of heads to use for multi-head attention. - attention_head_dim (`int`): The number of channels in each head. - dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. - cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. - activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. - num_embeds_ada_norm (: - obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. - attention_bias (: - obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. - """ - - def __init__( - self, - dim: int, - num_attention_heads: int, - attention_head_dim: int, - dropout: float = 0.0, - cross_attention_dim: Optional[int] = None, - attention_bias: bool = False, - upcast_attention: bool = False, - temb_channels: int = 768, # for ada_group_norm - add_self_attention: bool = False, - cross_attention_norm: Optional[str] = None, - group_size: int = 32, - ): - super().__init__() - self.add_self_attention = add_self_attention - - # 1. Self-Attn - if add_self_attention: - self.norm1 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size)) - self.attn1 = Attention( - query_dim=dim, - heads=num_attention_heads, - dim_head=attention_head_dim, - dropout=dropout, - bias=attention_bias, - cross_attention_dim=None, - cross_attention_norm=None, - ) - - # 2. Cross-Attn - self.norm2 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size)) - self.attn2 = Attention( - query_dim=dim, - cross_attention_dim=cross_attention_dim, - heads=num_attention_heads, - dim_head=attention_head_dim, - dropout=dropout, - bias=attention_bias, - upcast_attention=upcast_attention, - cross_attention_norm=cross_attention_norm, - ) - - def _to_3d(self, hidden_states, height, weight): - return hidden_states.permute(0, 2, 3, 1).reshape(hidden_states.shape[0], height * weight, -1) - - def _to_4d(self, hidden_states, height, weight): - return hidden_states.permute(0, 2, 1).reshape(hidden_states.shape[0], -1, height, weight) - - def forward( - self, - hidden_states: torch.FloatTensor, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - # TODO: mark emb as non-optional (self.norm2 requires it). - # requires assessing impact of change to positional param interface. - emb: Optional[torch.FloatTensor] = None, - attention_mask: Optional[torch.FloatTensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - ): - cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} - - # 1. Self-Attention - if self.add_self_attention: - norm_hidden_states = self.norm1(hidden_states, emb) - - height, weight = norm_hidden_states.shape[2:] - norm_hidden_states = self._to_3d(norm_hidden_states, height, weight) - - attn_output = self.attn1( - norm_hidden_states, - encoder_hidden_states=None, - attention_mask=attention_mask, - **cross_attention_kwargs, - ) - attn_output = self._to_4d(attn_output, height, weight) - - hidden_states = attn_output + hidden_states - - # 2. Cross-Attention/None - norm_hidden_states = self.norm2(hidden_states, emb) - - height, weight = norm_hidden_states.shape[2:] - norm_hidden_states = self._to_3d(norm_hidden_states, height, weight) - attn_output = self.attn2( - norm_hidden_states, - encoder_hidden_states=encoder_hidden_states, - attention_mask=attention_mask if encoder_hidden_states is None else encoder_attention_mask, - **cross_attention_kwargs, - ) - attn_output = self._to_4d(attn_output, height, weight) - - hidden_states = attn_output + hidden_states - - return hidden_states diff --git a/spaces/sparanoid/milky-green-svc/models.py b/spaces/sparanoid/milky-green-svc/models.py deleted file mode 100644 index 6efb5c541e1b2726ea4feb0973cd59f37ec1e0fd..0000000000000000000000000000000000000000 --- a/spaces/sparanoid/milky-green-svc/models.py +++ /dev/null @@ -1,556 +0,0 @@ -import math -import math - -import torch -from torch import nn -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn import functional as F -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -import attentions -import commons -import modules -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class PitchPredictor(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab # 音素的个数,中文和英文不同 - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.pitch_net = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, 1, 1) - - def forward(self, x, x_mask): - pitch_embedding = self.pitch_net(x * x_mask, x_mask) - pitch_embedding = pitch_embedding * x_mask - pred_pitch = self.proj(pitch_embedding) - return pred_pitch, pitch_embedding - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - # self.emb = nn.Embedding(n_vocab, hidden_channels) - # nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - self.emb_pitch = nn.Embedding(256, hidden_channels) - nn.init.normal_(self.emb_pitch.weight, 0.0, hidden_channels ** -0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, pitch): - # x = x.transpose(1,2) - # x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - # print(x.shape) - x = x + self.emb_pitch(pitch) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - # self.pitch_net = PitchPredictor(n_vocab, inter_channels, hidden_channels, filter_channels, n_heads, n_layers, - # kernel_size, p_dropout) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def infer(self, x, x_lengths, pitch, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, pitch) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - - w_ceil = w_ceil * 0 + 2 - # for index in range(w_ceil.shape[2]): - # if index%4 == 0: - # w_ceil[0,0,index] = 1.0 - - for i in range(w_ceil.shape[2]): - sep = 1 / 0.14 - if i * sep >= w_ceil.shape[2] * 2: - break - w_ceil[0, 0, int(i * sep / 2)] = 1 - - # print(w_ceil) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/adaptive_span/adaptive_span_model_wrapper.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/adaptive_span/adaptive_span_model_wrapper.py deleted file mode 100644 index 5b147fe11f9d730438d036321a2d4a5d776efaa2..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/adaptive_span/adaptive_span_model_wrapper.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from dataclasses import dataclass -from typing import Dict, List, Optional - -import torch -from fairseq.dataclass import FairseqDataclass -from fairseq.models import ( - FairseqIncrementalDecoder, - FairseqLanguageModel, - register_model, -) -from .adaptive_span_model import TransformerSeq as AdaptiveSpanTransformerModel - - -logger = logging.getLogger(__name__) - - -@dataclass -class AdaptiveSpanSmallConfig(FairseqDataclass): - # defaults come from https://github.com/facebookresearch/adaptive-span/blob/master/experiments/enwik8_small.sh - vocab_size: int = 50 - d_model: int = 256 - n_head: int = 4 - d_inner: int = 1024 - n_layer: int = 8 - attn_span: int = 1024 - dropout: float = 0.0 - emb_dropout: float = 0.0 - adapt_span_ramp: int = 32 - adapt_span_init: float = 0.0 - aux_loss_scaler: float = 0.000002 - adapt_span_layer: bool = False - - -@register_model("adaptive_span", dataclass=AdaptiveSpanSmallConfig) -class AdaptiveSpanTransformer(FairseqLanguageModel): - @classmethod - def build_model(cls, cfg: AdaptiveSpanSmallConfig, task): - return cls(AdaptiveSpanDecoder(cfg, task)) - - def get_aux_loss(self): - return self.decoder.get_aux_loss() - - def get_current_max_span(self): - return self.decoder.get_current_max_span() - - def get_current_avg_span(self): - return self.decoder.get_current_avg_span() - - -class AdaptiveSpanDecoder(FairseqIncrementalDecoder): - def __init__(self, cfg, task): - - super().__init__(task.target_dictionary) - - self.config = cfg - config = AdaptiveSpanSmallConfig( - vocab_size=len(task.target_dictionary), - d_model=cfg.d_model, - n_head=cfg.n_head, - d_inner=cfg.d_inner, - n_layer=cfg.n_layer, - attn_span=cfg.attn_span, - dropout=cfg.dropout, - emb_dropout=cfg.emb_dropout, - adapt_span_ramp=cfg.adapt_span_ramp, - adapt_span_init=cfg.adapt_span_init, - aux_loss_scaler=cfg.aux_loss_scaler, - adapt_span_layer=cfg.adapt_span_layer, - ) - logger.info(config) - self.model = AdaptiveSpanTransformerModel(**config.__dict__) - - self._mems = None - - def forward( - self, - src_tokens, - incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, - encoder_out=None, - ): - bsz = src_tokens.size(0) - if incremental_state is not None: # used during inference - mems = self.get_incremental_state("mems") - src_tokens = src_tokens[:, -1:] # only keep the most recent token - else: - mems = self._mems - - if mems is None: - # first time init - mems = self.init_hid_cache(bsz) - output = self.model(x=src_tokens, h_cache=mems,) - if incremental_state is not None: - self.set_incremental_state(incremental_state, "mems", output[1]) - else: - self._mems = output[1] - return (output[0],) - - def max_positions(self): - return self.config.attn_span - - def init_hid_cache(self, batch_sz): - hid = [] - for layer in self.model.layers: - param = next(self.model.parameters()) - h = torch.zeros( - batch_sz, - layer.get_cache_size(), - self.config.d_model, - dtype=param.dtype, - device=param.device, - ) - hid.append(h) - return hid - - def get_aux_loss(self): - return self.model.get_aux_loss() - - def get_current_max_span(self): - return self.model.get_current_max_span() - - def get_current_avg_span(self): - return self.model.get_current_avg_span() - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]], - new_order: torch.Tensor, - ): - """Reorder incremental state. - - This will be called when the order of the input has changed from the - previous time step. A typical use case is beam search, where the input - order changes between time steps based on the selection of beams. - """ - raise NotImplementedError("This is required for generation/beam search") - # mems = self.get_incremental_state(incremental_state, "mems") - # if mems is not None: - # new_mems = [mems_i.index_select(1, new_order) for mems_i in mems] - # self.set_incremental_state(incremental_state, "mems", new_mems) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py deleted file mode 100644 index 70d0016663b7d0b90033f4eb301b527f2c92a3f8..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -import sys - -import soundfile as sf -import torch -import torchaudio - -from feature_utils import get_path_iterator, dump_feature - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) -logger = logging.getLogger("dump_mfcc_feature") - - -class MfccFeatureReader(object): - def __init__(self, sample_rate): - self.sample_rate = sample_rate - - def read_audio(self, path, ref_len=None): - wav, sr = sf.read(path) - assert sr == self.sample_rate, sr - if wav.ndim == 2: - wav = wav.mean(-1) - assert wav.ndim == 1, wav.ndim - if ref_len is not None and abs(ref_len - len(wav)) > 160: - logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") - return wav - - def get_feats(self, path, ref_len=None): - x = self.read_audio(path, ref_len) - with torch.no_grad(): - x = torch.from_numpy(x).float() - x = x.view(1, -1) - - mfccs = torchaudio.compliance.kaldi.mfcc( - waveform=x, - sample_frequency=self.sample_rate, - use_energy=False, - ) # (time, freq) - mfccs = mfccs.transpose(0, 1) # (freq, time) - deltas = torchaudio.functional.compute_deltas(mfccs) - ddeltas = torchaudio.functional.compute_deltas(deltas) - concat = torch.cat([mfccs, deltas, ddeltas], dim=0) - concat = concat.transpose(0, 1).contiguous() # (freq, time) - return concat - - -def main(tsv_dir, split, nshard, rank, feat_dir, sample_rate): - reader = MfccFeatureReader(sample_rate) - generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) - dump_feature(reader, generator, num, split, nshard, rank, feat_dir) - - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("tsv_dir") - parser.add_argument("split") - parser.add_argument("nshard", type=int) - parser.add_argument("rank", type=int) - parser.add_argument("feat_dir") - parser.add_argument("--sample_rate", type=int, default=16000) - args = parser.parse_args() - logger.info(args) - - main(**vars(args)) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_text_joint_to_text/models/s2t_dualinputtransformer.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_text_joint_to_text/models/s2t_dualinputtransformer.py deleted file mode 100644 index 7970a3c71401b4835ba09158ea06134418afa065..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_text_joint_to_text/models/s2t_dualinputtransformer.py +++ /dev/null @@ -1,1090 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from collections import namedtuple - -import torch -import torch.nn as nn -from fairseq import checkpoint_utils -from fairseq import utils -from fairseq.models import ( - FairseqEncoder, - FairseqDecoder, - FairseqEncoderDecoderModel, - register_model, - register_model_architecture, -) -from fairseq.models.fairseq_encoder import EncoderOut -from fairseq.models.speech_to_text import ( - TransformerDecoder, - S2TTransformerEncoder, -) -from fairseq.models.transformer import TransformerEncoder -from fairseq.modules import ( - TransformerEncoderLayer, - GradMultiply, - LayerNorm, -) - -logger = logging.getLogger(__name__) - - -class SpeechEoSEncoder(FairseqEncoder): - def __init__(self, encoder, eos_num, feat_dim, adapter_type="None", adapter_dim=0): - super().__init__(None) - self.encoder = encoder - self.eos_num = eos_num # downsampling rate for speech input feature - self.eos_emb = ( - nn.Parameter(torch.zeros(1, feat_dim), requires_grad=True) - if eos_num > 0 - else None - ) - self.adapter = self.add_adapter(adapter_type, adapter_dim) - - def add_adapter(self, adapter_type, adapter_dim): - def _make_identity(linear, eps=1e-5): - assert isinstance(linear, nn.Linear) - linear.weight.data.mul_(eps) - linear.weight.data.fill_diagonal_(1.0) - if linear.bias is not None: - linear.bias.data.mul_(eps) - - adapter = None - if adapter_type == "Linear": - assert adapter_dim > 0 - adapter = nn.Sequential( - nn.Linear(adapter_dim, adapter_dim), LayerNorm(adapter_dim) - ) - # initialize the adapter as identity matrix first - _make_identity(adapter[0]) - - elif adapter_type == "MLP": - assert adapter_dim > 0 - # assume the model is pre-norm model - adapter = nn.Sequential( - nn.Linear(adapter_dim, 2 * adapter_dim), - nn.ReLU(), - nn.Linear(2 * adapter_dim, adapter_dim), - LayerNorm(adapter_dim), - ) - _make_identity(adapter[0]) - _make_identity(adapter[2]) - return adapter - - def add_eos(self, src_tokens, src_lengths): - bsz, max_seq_len, fdim = src_tokens.size() - if self.eos_num > 0: - src_token_eos = torch.zeros( - [bsz, max_seq_len + self.eos_num, fdim], - dtype=src_tokens.dtype, - device=src_tokens.device, - ) - src_token_eos[:, :max_seq_len] = src_tokens - for bi in range(bsz): - src_token_eos[bi][ - src_lengths[bi] : src_lengths[bi] + self.eos_num - ] = self.eos_emb.expand(self.eos_num, fdim) - src_lengths = src_lengths + self.eos_num - src_tokens = src_token_eos - return src_tokens, src_lengths - - def apply_adapter(self, enc_out): - if self.adapter is None: - return enc_out - rst = self.adapter(enc_out.encoder_out) - if enc_out.encoder_padding_mask is not None: - rst.masked_fill_( - enc_out.encoder_padding_mask.transpose(0, 1).unsqueeze(-1), 0 - ) - return EncoderOut( - encoder_out=rst, - encoder_padding_mask=enc_out.encoder_padding_mask, - encoder_embedding=enc_out.encoder_embedding, - encoder_states=enc_out.encoder_states, - src_tokens=enc_out.src_tokens, - src_lengths=enc_out.src_lengths, - ) - - def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs): - """ - src_tokens: padded tensor (B, T, C * feat) - src_lengths: tensor of original lengths of input utterances (B,) - """ - src_tokens, src_lengths = self.add_eos(src_tokens, src_lengths) - enc_out = self.encoder(src_tokens, src_lengths, return_all_hiddens) - enc_out = self.apply_adapter(enc_out) - return enc_out - - def reorder_encoder_out(self, encoder_out, new_order): - return self.encoder.reorder_encoder_out(encoder_out, new_order) - - -class DualInputEncoder(FairseqEncoder): - def __init__( - self, - args, - spch_encoder, - text_encoder, - dictionary, - cross_attentive_loss_before_last_layer=-1, - ): - super().__init__(dictionary) - - self.spch_encoder = spch_encoder - self.text_encoder = text_encoder - self.enc_grad_mult = args.enc_grad_mult - self.cross_attentive_loss_before_last_layer = ( - cross_attentive_loss_before_last_layer - ) - self.use_cross_attentive_loss = ( - False if cross_attentive_loss_before_last_layer <= -1 else True - ) - self.enc2_along_grad_mult = args.enc2_along_grad_mult - - @classmethod - def set_shared_layer(cls, share_level, src_layer, tgt_layer): - """ - share parameters from tgt_layer to src_layer - share_level: - 0: share everything - 1: share everything but different model - 2: share weight but not bias, layernorm - """ - if share_level == 0: - return tgt_layer - if isinstance(src_layer, nn.Linear): - return tgt_layer - if isinstance(src_layer, TransformerEncoderLayer): - assert src_layer.embed_dim == tgt_layer.embed_dim - assert src_layer.normalize_before == tgt_layer.normalize_before - if share_level == 1: - src_layer.fc1 = tgt_layer.fc1 - src_layer.fc2 = tgt_layer.fc2 - src_layer.self_attn = tgt_layer.self_attn - src_layer.final_layer_norm = tgt_layer.final_layer_norm - src_layer.self_attn_layer_norm = tgt_layer.self_attn_layer_norm - src_layer.layernorm_embedding = tgt_layer.layernorm_embedding - else: - src_layer.fc1.weight = tgt_layer.fc1.weight - src_layer.fc2.weight = tgt_layer.fc2.weight - src_layer.self_attn.k_proj.weight = tgt_layer.self_attn.k_proj.weight - src_layer.self_attn.v_proj.weight = tgt_layer.self_attn.v_proj.weight - src_layer.self_attn.q_proj.weight = tgt_layer.self_attn.q_proj.weight - src_layer.self_attn.out_proj.weight = ( - tgt_layer.self_attn.out_proj.weight - ) - else: - if share_level == 1: - return tgt_layer - return src_layer - - @classmethod - def build_spch_encoder(cls, args): - cfg = { - "input_feat_per_channel": args.input_feat_per_channel, - "input_channels": args.input_channels, - "conv_kernel_sizes": args.conv_kernel_sizes, - "conv_channels": args.conv_channels, - "encoder_embed_dim": args.encoder_embed_dim, - "encoder_ffn_embed_dim": args.encoder_ffn_embed_dim, - "encoder_layers": args.speech_encoder_layers, - "encoder_layerdrop": args.encoder_layerdrop, - "encoder_attention_heads": args.encoder_attention_heads, - "max_source_positions": args.max_source_positions, - "dropout": args.dropout, - "encoder_normalize_before": args.encoder_normalize_before, - "activation_dropout": args.activation_dropout, - "attention_dropout": args.attention_dropout, - "activation_fn": args.activation_fn, - "layernorm_embedding": args.layernorm_embedding, - "no_token_positional_embeddings": args.no_token_positional_embeddings, - "no_scale_embedding": args.no_scale_embedding, - "quant_noise_pq": args.quant_noise_pq, - "encoder_freezing_updates": 0, - } - model_args = namedtuple("args", cfg.keys())(*cfg.values()) - spch_encoder = S2TTransformerEncoder(model_args) - if args.add_speech_eos: - spch_encoder = SpeechEoSEncoder( - spch_encoder, - 2 * len(args.conv_kernel_sizes.split(",")), - args.input_feat_per_channel, - adapter_type=getattr(args, "speech_encoder_adapter_type", "None"), - adapter_dim=args.encoder_embed_dim, - ) - return spch_encoder - - @classmethod - def build_text_encoder(cls, args, src_dictionary, spch_encoder): - if args.encoder_shared_layers > 0: - mx_shared_layers = ( - args.speech_encoder_layers - if args.speech_encoder_layers < args.text_encoder_layers - else args.text_encoder_layers - ) - args.encoder_shared_layers = ( - args.encoder_shared_layers - if args.encoder_shared_layers <= mx_shared_layers - else mx_shared_layers - ) - cfg = { - "encoder_embed_dim": args.encoder_text_embed_dim, - "encoder_ffn_embed_dim": args.encoder_ffn_embed_dim, - "encoder_layers": args.text_encoder_layers, - "encoder_layerdrop": args.encoder_layerdrop, - "encoder_attention_heads": args.encoder_attention_heads, - "encoder_learned_pos": args.encoder_learned_pos, - "max_source_positions": args.max_source_positions, - "dropout": args.dropout, - "encoder_normalize_before": args.encoder_normalize_before, - "activation_dropout": args.activation_dropout, - "attention_dropout": args.attention_dropout, - "activation_fn": args.activation_fn, - "adaptive_input": args.adaptive_input, - "no_token_positional_embeddings": args.no_token_positional_embeddings, - "no_scale_embedding": args.no_scale_embedding, - "quant_noise_pq": args.quant_noise_pq, - } - model_args = namedtuple("args", cfg.keys())(*cfg.values()) - enc_emb = nn.Embedding( - len(src_dictionary), model_args.encoder_embed_dim, src_dictionary.pad() - ) - text_encoder = TransformerEncoder(model_args, src_dictionary, enc_emb) - if args.add_speech_eos: - spch_encoder = spch_encoder.encoder - if args.encoder_shared_layers > 0: - text_encoder.layer_norm = cls.set_shared_layer( - args.encoder_shared_layer_level, - text_encoder.layer_norm, - spch_encoder.layer_norm, - ) - for i, ly in enumerate( - spch_encoder.transformer_layers[-args.encoder_shared_layers :] - ): - ly_id = i + args.text_encoder_layers - args.encoder_shared_layers - assert isinstance(text_encoder.layers[ly_id], type(ly)) - text_encoder.layers[ly_id] = cls.set_shared_layer( - args.encoder_shared_layer_level, - text_encoder.layers[ly_id], - ly, - ) - return text_encoder - - def mult_rst_grad(self, rst, ratio): - assert isinstance(rst, dict) # instead of EncoderOut - assert len(rst["encoder_out"]) == 1 - rst["encoder_out"][0] = GradMultiply.apply(rst["encoder_out"][0], ratio) - return rst - - def process_attentive_loss_states(self, rst, interstates): - assert isinstance(rst, dict) # instead of EncoderOut - rst["encoder_states"] = interstates - return rst - - def forward( - self, - src_tokens, - src_lengths=None, - src_txt_tokens=None, - src_txt_lengths=None, - **kwargs - ): - """ - Args: - src_tokens: padded tensor (B, T, C * feat) - src_lengths: tensor of original lengths of input utterances (speech) (B,) - src_txt_tokens: padded tensor (B, T) - src_txt_lengths: tensor of original lengths of input utterances (text) (B,) - """ - # src_tokens only: inference - # src_tokens, src_lengths: speech only training - # src_txt_tokens, src_txt_lengths: text only training - # all valid: speech + text training - - if src_tokens is None and src_txt_tokens is None: - raise ValueError( - "src_tokens and src_txt_tokens cannot be None at the same time" - ) - ret1 = None - ret2 = None - return_all_hiddens = False - if src_tokens is not None: - if ( - self.use_cross_attentive_loss and src_txt_tokens is not None - ): # remove self.training so we can get attn score during validation step - return_all_hiddens = True - ret1 = self.spch_encoder( - src_tokens, src_lengths, return_all_hiddens=return_all_hiddens - ) - - if self.use_cross_attentive_loss and src_txt_tokens is not None: - assert self.cross_attentive_loss_before_last_layer < len( - ret1["encoder_states"] - ) - ret1 = self.process_attentive_loss_states( - ret1, - ret1["encoder_states"][ - -self.cross_attentive_loss_before_last_layer - 1 - ], - ) - - if src_txt_tokens is not None: - ret2 = self.text_encoder( - src_txt_tokens, src_txt_lengths, return_all_hiddens=return_all_hiddens - ) - if return_all_hiddens: - if self.cross_attentive_loss_before_last_layer == len( - self.text_encoder.layers - ): - text_embedding, _ = self.text_encoder.forward_embedding( - src_txt_tokens - ) - text_embedding = text_embedding.transpose(0, 1) - ret2 = self.process_attentive_loss_states(ret2, text_embedding) - else: - assert self.cross_attentive_loss_before_last_layer < len( - self.text_encoder.layers - ) - ret2 = self.process_attentive_loss_states( - ret2, - ret2["encoder_states"][ - -self.cross_attentive_loss_before_last_layer - 1 - ], - ) - - def merge_output(rst1, rst2): - if rst1 is None: - if not (self.enc2_along_grad_mult == 1.0 or self.training): - rst2 = self.mult_rst_grad(rst2, self.enc2_along_grad_mult) - return rst2 - if rst2 is None: - return rst1 - if self.enc_grad_mult != 1.0 and self.training: - rst1 = self.mult_rst_grad(rst1, self.enc_grad_mult) - rst2 = self.mult_rst_grad(rst2, self.enc_grad_mult) - rst = (rst1, rst2) - return rst - - return merge_output(ret1, ret2) - - def reorder_encoder_out(self, encoder_out, new_order): - assert self.training is False # used for inference only - return self.spch_encoder.reorder_encoder_out(encoder_out, new_order) - - -# TransformerMultiInputDecoder: take one or two encoder inputs -class TransformerMultiInputDecoder(FairseqDecoder): - def __init__( - self, - dictionary, - spch_decoder, - text_decoder, - compute_cross_attentive_loss=False, - cross_attentive_loss_with_norm=True, - cross_attentive_loss_reverse=False, - ): - - super().__init__(dictionary) - self.spch_decoder = spch_decoder - self.text_decoder = text_decoder - self.compute_cross_attentive_loss = compute_cross_attentive_loss - self.cross_attentive_loss_with_norm = cross_attentive_loss_with_norm - self.cross_attentive_loss_reverse = cross_attentive_loss_reverse - - @classmethod - def share_spchdecoder(cls, task_args, text_decoder, spch_decoder): - if task_args.decoder_shared_layer_level == 0: - return text_decoder - assert text_decoder.embed_tokens == spch_decoder.embed_tokens - spch_decoder.project_in_dim = text_decoder.project_in_dim - spch_decoder.embed_positions = text_decoder.embed_positions - spch_decoder.layernorm_embedding = text_decoder.layernorm_embedding - spch_decoder.project_out_dim = text_decoder.project_out_dim - spch_decoder.adaptive_softmax = text_decoder.adaptive_softmax - if task_args.decoder_shared_layer_level == 1: - spch_decoder.output_projection = text_decoder.output_projection - spch_decoder.layer_norm = text_decoder.layer_norm - else: # 2 - spch_decoder.output_projection.weight = ( - text_decoder.output_projection.weight - ) - for i, ly in enumerate(text_decoder.layers): - sly = spch_decoder.layers[i] - sly.self_attn = ly.self_attn - sly.self_attn_layer_norm = ly.self_attn_layer_norm - # sly.encoder_attn = ly.encoder_attn - if ( - task_args.decoder_shared_layer_level == 1 - ): # share everything, but under different models - sly.encoder_attn = ly.encoder_attn - sly.encoder_attn_layer_norm = ly.encoder_attn_layer_norm - sly.fc1 = ly.fc1 - sly.fc2 = ly.fc2 - sly.final_layer_norm = ly.final_layer_norm - else: # task_args.decoder_shared_layer_level == 2: #separated encoder_attn_layer_norm and bias - sly.encoder_attn.k_proj.weight = ly.encoder_attn.k_proj.weight - sly.encoder_attn.v_proj.weight = ly.encoder_attn.v_proj.weight - sly.encoder_attn.q_proj.weight = ly.encoder_attn.q_proj.weight - sly.encoder_attn.out_proj.weight = ly.encoder_attn.out_proj.weight - sly.fc1.weight = ly.fc1.weight - sly.fc2.weight = ly.fc2.weight - - return spch_decoder - - def cross_attentive_loss( - self, teacher_states, student_states, teacher_masking, student_masking, eps=1e-6 - ): - x = teacher_states.transpose(0, 1) # from T X B X D to B X T X D - y = student_states.transpose(0, 1) - if self.cross_attentive_loss_with_norm: - x = x / (x.norm(dim=2, keepdim=True) + eps) - y = y / (y.norm(dim=2, keepdim=True) + eps) - dim = x.size(-1) - # lengths: batch X seqLen - sim_scores_xy = torch.bmm(x, y.transpose(1, 2)) # batch X lenx X leny ] - if y.dtype == torch.float16: - sim_scores_xy = sim_scores_xy.float() - y = y.float() - x = x.float() - if teacher_masking != []: - assert len(teacher_masking) == 1 - sim_scores_xy = sim_scores_xy.masked_fill( - teacher_masking[0].unsqueeze(-1), float("-inf") - ) - if student_masking != []: - sim_scores_xy = sim_scores_xy.masked_fill( - student_masking[0].unsqueeze(1), float("-inf") - ) - # do masking - y_weights = utils.softmax(sim_scores_xy, dim=-1) - if teacher_masking != []: - y_weights = y_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0) - x_reconstruct_from_y = torch.bmm(y_weights, y) - - sim_scores_xx = torch.bmm(x, x.transpose(1, 2)) # batch X lenx X lenx ] - x_weights = utils.softmax(sim_scores_xx, dim=-1) - if teacher_masking != []: - x_weights = x_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0) - - # no gradient for teacher state - x_reconstruct_from_x = torch.bmm(x_weights, x).detach() - cost = (x_reconstruct_from_x - x_reconstruct_from_y).norm(dim=2) - if teacher_masking != []: - cost = cost.masked_fill(teacher_masking[0], 0) - - if not self.cross_attentive_loss_with_norm: - cost = cost / dim - return cost - - def forward( - self, - prev_output_tokens, - encoder_out, - incremental_state=None, - has_txt_input=False, - **kwargs - ): - """ - Args: - prev_output_tokens (LongTensor): previous decoder outputs of shape - `(batch, tgt_len)`, for input feeding/teacher forcing. If there are - two or more input during training, they will share the same prev_output_tokens - encoder_out (tuple[Tensor]): output from the encoder, used for - encoder-side attention. It will be tuple if there are more inputs, but a tensor - if only one input - incremental_state ([dict]): dictionary used for storing state during - :ref:`Incremental decoding`. It is only valid for inference, only from single - input - Returns: - tuple: - - the last decoder layer's output of shape `(batch, tgt_len, - vocab)`. If there are N inputs, batch will be N bigger than a single input - - the last decoder layer's attention weights of shape `(batch, - tgt_len, src_len)` - """ - assert not isinstance(encoder_out, EncoderOut) - if isinstance(encoder_out, tuple): # training with mulitple input - rst = [] - assert len(encoder_out) == 2 - for i, eo in enumerate(encoder_out): - assert incremental_state is None - if i == 0: - rst.append( - self.spch_decoder(prev_output_tokens, eo, incremental_state) - ) - else: - rst.append( - self.text_decoder(prev_output_tokens, eo, incremental_state) - ) - dec_out = torch.cat([r[0] for r in rst], dim=0) - attn_cost = None - if self.compute_cross_attentive_loss: - assert isinstance(encoder_out[0], dict) - if self.cross_attentive_loss_reverse: - attn_cost = self.cross_attentive_loss( - teacher_states=encoder_out[1]["encoder_states"], # text_states - student_states=encoder_out[0]["encoder_states"], # spch_states - teacher_masking=encoder_out[1]["encoder_padding_mask"], - student_masking=encoder_out[0]["encoder_padding_mask"], - ) - else: - attn_cost = self.cross_attentive_loss( - teacher_states=encoder_out[0]["encoder_states"], # spch_states - student_states=encoder_out[1]["encoder_states"], # text_states - teacher_masking=encoder_out[0]["encoder_padding_mask"], - student_masking=encoder_out[1]["encoder_padding_mask"], - ) - - return (dec_out, {"attn_cost": attn_cost}) - else: # inference or training with one input - if has_txt_input: - return self.text_decoder( - prev_output_tokens, encoder_out, incremental_state - ) - return self.spch_decoder(prev_output_tokens, encoder_out, incremental_state) - - -# Note: -# dual input transformer: -# encoder: S2TTransformerEncoder for speech + TransformerEncoder for text -# decoder: TransformerDecoder for text -@register_model("dual_input_s2t_transformer") -class DualInputS2TTransformerModel(FairseqEncoderDecoderModel): - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - self.num_updates = 0 - - def max_positions(self): - return None # it is provided in task - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - # encoder 1: S2TTransformerEncoder for speech - parser.add_argument( - "--conv-kernel-sizes", - type=str, - metavar="N", - help="kernel sizes of Conv1d subsampling layers", - ) - parser.add_argument( - "--conv-channels", - type=int, - metavar="N", - help="# of channels in Conv1d subsampling layers", - ) - parser.add_argument( - "--enc-output-dim", - type=int, - metavar="N", - help=""" - encoder output dimension, can be None. If specified, projecting the - transformer output to the specified dimension""", - ) - # standard Transformer - parser.add_argument( - "--activation-fn", - type=str, - default="relu", - choices=utils.get_available_activation_fns(), - help="activation function to use", - ) - parser.add_argument( - "--dropout", type=float, metavar="D", help="dropout probability" - ) - parser.add_argument( - "--attention-dropout", - type=float, - metavar="D", - help="dropout probability for attention weights", - ) - parser.add_argument( - "--activation-dropout", - "--relu-dropout", - type=float, - metavar="D", - help="dropout probability after activation in FFN.", - ) - parser.add_argument( - "--encoder-embed-dim", - type=int, - metavar="N", - help="encoder embedding dimension", - ) - parser.add_argument( - "--encoder-text-embed-dim", - type=int, - metavar="N", - help="encoder text embedding dimension", - ) - parser.add_argument( - "--encoder-ffn-embed-dim", - type=int, - metavar="N", - help="encoder embedding dimension for FFN", - ) - parser.add_argument( - "--encoder-attention-heads", - type=int, - metavar="N", - help="num encoder attention heads", - ) - parser.add_argument( - "--decoder-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension", - ) - parser.add_argument( - "--decoder-ffn-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension for FFN", - ) - parser.add_argument( - "--decoder-layers", type=int, metavar="N", help="num decoder layers" - ) - parser.add_argument( - "--decoder-attention-heads", - type=int, - metavar="N", - help="num decoder attention heads", - ) - parser.add_argument( - "--layernorm-embedding", - action="store_true", - help="add layernorm to embedding", - ) - parser.add_argument( - "--no-scale-embedding", - action="store_true", - help="if True, dont scale embeddings", - ) - # non-standard transformer parameters - parser.add_argument( - "--speech-encoder-layers", - type=int, - metavar="N", - help="num speech encoder layers", - ) - parser.add_argument( - "--text-encoder-layers", - type=int, - metavar="N", - help="num text encoder layers", - ) - parser.add_argument( - "--encoder-shared-layers", - type=int, - metavar="N", - help="num shared encoder layers", - ) - parser.add_argument( - "--encoder-shared-layer-level", - type=int, - metavar="N", - default=0, - choices=[0, 1, 2], - help="share layer level 0: all share 1: all share with separate model 2: share weight but not bias and layernorm", - ) - - parser.add_argument( - "--decoder-shared-layer-level", - default=0, - choices=[0, 1, 2], - type=int, - metavar="N", - help="0: share everything; 1: share everything with different model 2: no share layer_norm and bias", - ) - ### - parser.add_argument( - "--text-input-cost-ratio", - type=float, - default=1.0, - metavar="V", - help="text input cost ratio relative to speech input cost", - ) - parser.add_argument( - "--init-scale", - type=float, - default=1.0, - metavar="V", - help="scale the initial weight by given factor", - ) - parser.add_argument( - "--enc-grad-mult", - type=float, - metavar="V", - default=1.0, - help="multiply enc1 and enc2 gradient by V", - ) - parser.add_argument( - "--enc2-along-grad-mult", - type=float, - metavar="V", - default=1.0, - help="multiply enc2 gradient by V if only enc2 is used", - ) - parser.add_argument( - "--load-pretrain-encoder", - type=str, - default="", - metavar="EXPR", - help=""" path to the pretrained encoder """, - ) - parser.add_argument( - "--load-pretrain-speech-encoder", - type=str, - default="", - metavar="EXPR", - help=""" path to the pretrained speech encoder """, - ) - parser.add_argument( - "--load-pretrain-text-encoder", - type=str, - default="", - metavar="EXPR", - help=""" path to the pretrained text encoder """, - ) - parser.add_argument( - "--load-pretrain-text-encoder-last", - type=str, - default="", - metavar="EXPR", - help=""" path to the pretrained text encoder """, - ) - parser.add_argument( - "--load-pretrain-decoder", - type=str, - metavar="EXPR", - default="", - help=""" path to the pretrained encoder """, - ) - parser.add_argument( - "--add-speech-eos", - action="store_true", - help="add eos token at the end of input feature", - ) - parser.add_argument( - "--speech-encoder-adapter-type", - type=str, - metavar="EXPR", - default="None", - choices=["None", "Linear", "MLP"], - help="add speech encoder adapter", - ) - - @classmethod - def build_encoder(cls, args, task): - spch_encoder = DualInputEncoder.build_spch_encoder(args) - text_encoder = DualInputEncoder.build_text_encoder( - args, task.src_dict, spch_encoder - ) - cross_attentive_loss_before_last_layer = ( - 0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1 - ) - encoder = DualInputEncoder( - args, - spch_encoder, - text_encoder, - task.src_dict, - cross_attentive_loss_before_last_layer, - ) - if args.init_scale != 1.0: - with torch.no_grad(): - for param in encoder.parameters(): - param.data.mul_(args.init_scale) - if args.load_pretrain_text_encoder != "": - checkpoint_utils.load_pretrained_component_from_model( - text_encoder, args.load_pretrain_text_encoder - ) - if args.load_pretrain_speech_encoder != "": - if hasattr(spch_encoder, "encoder"): - checkpoint_utils.load_pretrained_component_from_model( - spch_encoder.encoder, args.load_pretrain_speech_encoder - ) - else: - checkpoint_utils.load_pretrained_component_from_model( - spch_encoder, args.load_pretrain_speech_encoder - ) - if ( - args.load_pretrain_text_encoder_last != "" - ): # if share encoder, speech encoder parameters will be used. - # It provides a chance to use pre-trained mt encoder instead - checkpoint_utils.load_pretrained_component_from_model( - text_encoder, args.load_pretrain_text_encoder_last - ) - - if args.load_pretrain_encoder != "": - checkpoint_utils.load_pretrained_component_from_model( - encoder, args.load_pretrain_encoder - ) - return encoder - - @classmethod - def build_decoder(cls, args, task): - dec_cfg = { - "decoder_layerdrop": args.decoder_layerdrop, - "share_decoder_input_output_embed": args.share_decoder_input_output_embed, - "decoder_embed_dim": args.decoder_embed_dim, - "max_target_positions": args.max_target_positions, - "dropout": args.dropout, - "encoder_learned_pos": args.encoder_learned_pos, - "decoder_learned_pos": args.decoder_learned_pos, - "layernorm_embedding": args.layernorm_embedding, - "decoder_normalize_before": args.decoder_normalize_before, - "activation_dropout": args.activation_dropout, - "attention_dropout": args.attention_dropout, - "decoder_ffn_embed_dim": args.decoder_ffn_embed_dim, - "decoder_layers": args.decoder_layers, - "decoder_attention_heads": args.decoder_attention_heads, - "decoder_output_dim": args.decoder_embed_dim, - "no_scale_embedding": args.no_scale_embedding, - "adaptive_input": args.adaptive_input, - "quant_noise_pq": args.quant_noise_pq, - "adaptive_softmax_cutoff": args.adaptive_softmax_cutoff, - "tie_adaptive_weights": args.tie_adaptive_weights, - "no_token_positional_embeddings": args.no_token_positional_embeddings, - } - dec_cfg = namedtuple("args", dec_cfg.keys())(*dec_cfg.values()) - dec_emb = nn.Embedding( - len(task.target_dictionary), - args.decoder_embed_dim, - task.target_dictionary.pad(), - ) - compute_cross_attentive_loss = ( - True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False - ) - cross_attentive_loss_without_norm = getattr( - args, "attentive_cost_without_normalize", False - ) - cross_attentive_loss_reverse = ( - False # getattr(args, "attentive_cost_reverse", False) - ) - - text_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb) - spch_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb) - spch_decoder = TransformerMultiInputDecoder.share_spchdecoder( - args, text_decoder, spch_decoder - ) - decoder = TransformerMultiInputDecoder( - dictionary=task.target_dictionary, - spch_decoder=spch_decoder, - text_decoder=text_decoder, - compute_cross_attentive_loss=compute_cross_attentive_loss, - cross_attentive_loss_with_norm=True - if not cross_attentive_loss_without_norm - else False, - cross_attentive_loss_reverse=cross_attentive_loss_reverse, - ) - if args.init_scale != 1.0: - with torch.no_grad(): - for param in decoder.parameters(): - param.data.mul_(args.init_scale) - if args.load_pretrain_decoder != "": - try: - checkpoint_utils.load_pretrained_component_from_model( - decoder, args.load_pretrain_decoder - ) - except RuntimeError: - checkpoint_utils.load_pretrained_component_from_model( - decoder.text_decoder, args.load_pretrain_decoder - ) - if args.decoder_shared_layer_level > 0: - checkpoint_utils.load_pretrained_component_from_model( - decoder.spch_decoder, args.load_pretrain_decoder - ) - - return decoder - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - # make sure that all args are properly defaulted - # (in case there are any new ones) - dualinputs2ttransformer_base(args) - - encoder = cls.build_encoder(args, task) - decoder = cls.build_decoder(args, task) - return cls(encoder, decoder) - - def get_normalized_probs(self, net_output, log_probs, sample=None): - # net_output['encoder_out'] is a (B, T, D) tensor - lprobs = super().get_normalized_probs(net_output, log_probs, sample) - lprobs.batch_first = True - return lprobs - - def set_num_updates(self, num_updates): - """Set the number of parameters updates.""" - super().set_num_updates(num_updates) - self.num_updates = num_updates - - def forward( - self, - src_tokens, - src_lengths, - prev_output_tokens, - use_encoder_outputs=False, - src_txt_tokens=None, - src_txt_lengths=None, - mode="sup_speech", - **kwargs - ): - """ - Run the forward pass for an encoder-decoder model. - - First feed a batch of source tokens through the encoder. Then, feed the - encoder output and previous decoder outputs (i.e., teacher forcing) to - the decoder to produce the next outputs:: - - encoder_out = self.encoder(src_tokens, src_lengths) - return self.decoder(prev_output_tokens, encoder_out) - - Args: - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (LongTensor): source sentence lengths of shape `(batch)` - prev_output_tokens (LongTensor): previous decoder outputs of shape - `(batch, tgt_len)`, for teacher forcing - mode = 'sup_speech' or 'text' - - Returns: - tuple: - - the decoder's output of shape `(batch, tgt_len, vocab)` - - a dictionary with any model-specific outputs - """ - if mode == "text": - assert src_txt_tokens is None - src_txt_tokens = src_tokens - src_txt_lengths = src_lengths - src_tokens = None - src_lengths = None - encoder_out = self.encoder( - src_tokens, - src_lengths=src_lengths, - src_txt_tokens=src_txt_tokens, - src_txt_lengths=src_txt_lengths, - **kwargs - ) - has_txt_input = True if src_txt_tokens is not None else False - decoder_out = self.decoder( - prev_output_tokens, - encoder_out=encoder_out, - has_txt_input=has_txt_input, - **kwargs - ) - if use_encoder_outputs: - return decoder_out, encoder_out - return decoder_out - - -@register_model_architecture( - "dual_input_s2t_transformer", "dualinputs2ttransformer_base" -) -def dualinputs2ttransformer_base(args): - args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0) - # Convolutional subsampler - args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) - args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5") - args.conv_channels = getattr(args, "conv_channels", 1024) - # Transformer - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_text_embed_dim = getattr( - args, "encoder_text_embed_dim", args.encoder_embed_dim - ) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) - args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) - args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) - - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.dropout = getattr(args, "dropout", 0.1) - args.attention_dropout = getattr(args, "attention_dropout", args.dropout) - args.activation_dropout = getattr(args, "activation_dropout", args.dropout) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.layernorm_embedding = getattr(args, "layernorm_embedding", False) - args.no_scale_embedding = getattr(args, "no_scale_embedding", False) - args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) - - args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10) - args.text_encoder_layers = getattr(args, "text_encoder_layers", 6) - args.encoder_shared_layers = getattr(args, "encoder_shared_layers", 0) - args.decoder_layers = getattr(args, "decoder_layers", 6) - - args.add_speech_eos = getattr(args, "add_speech_eos", False) - - -@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_s") -def dualinputs2ttransformer_s(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) - args.dropout = getattr(args, "dropout", 0.1) - args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 7) - args.text_encoder_layers = getattr(args, "text_encoder_layers", 7) - args.decoder_layers = getattr(args, "decoder_layers", 7) - dualinputs2ttransformer_base(args) - - -@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_m") -def dualinputs2ttransformer_m(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.dropout = getattr(args, "dropout", 0.15) - args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10) - args.text_encoder_layers = getattr(args, "text_encoder_layers", 6) - args.decoder_layers = getattr(args, "decoder_layers", 6) - dualinputs2ttransformer_base(args) - - -@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_b") -def dualinputs2ttransformer_b(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768 * 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12) - args.dropout = getattr(args, "dropout", 0.15) - args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12) - args.text_encoder_layers = getattr(args, "text_encoder_layers", 6) - args.decoder_layers = getattr(args, "decoder_layers", 6) - dualinputs2ttransformer_base(args) - - -@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_l") -def dualinputs2ttransformer_l(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) - args.dropout = getattr(args, "dropout", 0.2) - args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12) - args.text_encoder_layers = getattr(args, "text_encoder_layers", 6) - args.decoder_layers = getattr(args, "decoder_layers", 6) - dualinputs2ttransformer_base(args) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/bleu_utils.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/bleu_utils.py deleted file mode 100644 index 75cc5272d367c4f3be98d698b512a529bdb2e4f5..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/bleu_utils.py +++ /dev/null @@ -1,166 +0,0 @@ -""" - -TODO: the code is take from Apache-2 Licensed NLTK: make sure we do this properly! - - -Copied over from nltk.tranlate.bleu_score. This code has two major changes: - - allows to turn off length/brevity penalty --- it has no sense for self-bleu, - - allows to use arithmetic instead of geometric mean -""" - -import math -import sys -from fractions import Fraction -import warnings -from collections import Counter -from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction - - -def corpus_bleu( - list_of_references, - hypotheses, - weights=(0.25, 0.25, 0.25, 0.25), - smoothing_function=None, - auto_reweigh=False, - averaging_mode="geometric", - no_length_penalty=False -): - """ - Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all - the hypotheses and their respective references. - - Instead of averaging the sentence level BLEU scores (i.e. marco-average - precision), the original BLEU metric (Papineni et al. 2002) accounts for - the micro-average precision (i.e. summing the numerators and denominators - for each hypothesis-reference(s) pairs before the division). - - >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', - ... 'ensures', 'that', 'the', 'military', 'always', - ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] - >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', - ... 'ensures', 'that', 'the', 'military', 'will', 'forever', - ... 'heed', 'Party', 'commands'] - >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', - ... 'guarantees', 'the', 'military', 'forces', 'always', - ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] - >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', - ... 'army', 'always', 'to', 'heed', 'the', 'directions', - ... 'of', 'the', 'party'] - - >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', - ... 'interested', 'in', 'world', 'history'] - >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', - ... 'because', 'he', 'read', 'the', 'book'] - - >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] - >>> hypotheses = [hyp1, hyp2] - >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS - 0.5920... - - The example below show that corpus_bleu() is different from averaging - sentence_bleu() for hypotheses - - >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1) - >>> score2 = sentence_bleu([ref2a], hyp2) - >>> (score1 + score2) / 2 # doctest: +ELLIPSIS - 0.6223... - - :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses - :type list_of_references: list(list(list(str))) - :param hypotheses: a list of hypothesis sentences - :type hypotheses: list(list(str)) - :param weights: weights for unigrams, bigrams, trigrams and so on - :type weights: list(float) - :param smoothing_function: - :type smoothing_function: SmoothingFunction - :param auto_reweigh: Option to re-normalize the weights uniformly. - :type auto_reweigh: bool - :return: The corpus-level BLEU score. - :rtype: float - """ - # Before proceeding to compute BLEU, perform sanity checks. - - p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches. - p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref. - hyp_lengths, ref_lengths = 0, 0 - - assert len(list_of_references) == len(hypotheses), ( - "The number of hypotheses and their reference(s) should be the " "same " - ) - - # Iterate through each hypothesis and their corresponding references. - for references, hypothesis in zip(list_of_references, hypotheses): - # For each order of ngram, calculate the numerator and - # denominator for the corpus-level modified precision. - for i, _ in enumerate(weights, start=1): - p_i = modified_precision(references, hypothesis, i) - p_numerators[i] += p_i.numerator - p_denominators[i] += p_i.denominator - - # Calculate the hypothesis length and the closest reference length. - # Adds them to the corpus-level hypothesis and reference counts. - hyp_len = len(hypothesis) - hyp_lengths += hyp_len - ref_lengths += closest_ref_length(references, hyp_len) - - # Calculate corpus-level brevity penalty. - if no_length_penalty and averaging_mode == 'geometric': - bp = 1.0 - elif no_length_penalty and averaging_mode == 'arithmetic': - bp = 0.0 - else: - assert not no_length_penalty - assert averaging_mode != 'arithmetic', 'Not sure how to apply length penalty when aurithmetic mode' - bp = brevity_penalty(ref_lengths, hyp_lengths) - - # Uniformly re-weighting based on maximum hypothesis lengths if largest - # order of n-grams < 4 and weights is set at default. - if auto_reweigh: - if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25): - weights = (1 / hyp_lengths,) * hyp_lengths - - # Collects the various precision values for the different ngram orders. - p_n = [ - Fraction(p_numerators[i], p_denominators[i], _normalize=False) - for i, _ in enumerate(weights, start=1) - ] - - # Returns 0 if there's no matching n-grams - # We only need to check for p_numerators[1] == 0, since if there's - # no unigrams, there won't be any higher order ngrams. - if p_numerators[1] == 0: - return 0 - - # If there's no smoothing, set use method0 from SmoothinFunction class. - if not smoothing_function: - smoothing_function = SmoothingFunction().method0 - # Smoothen the modified precision. - # Note: smoothing_function() may convert values into floats; - # it tries to retain the Fraction object as much as the - # smoothing method allows. - p_n = smoothing_function( - p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths - ) - - if averaging_mode == "geometric": - s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n)) - s = bp * math.exp(math.fsum(s)) - elif averaging_mode == "arithmetic": - s = (w_i * p_i for w_i, p_i in zip(weights, p_n)) - s = math.fsum(s) - - return s - - -def sentence_bleu( - references, - hypothesis, - weights=(0.25, 0.25, 0.25, 0.25), - smoothing_function=None, - auto_reweigh=False, - averaging_mode="geometric", - no_length_penalty=False -): - return corpus_bleu( - [references], [hypothesis], weights, smoothing_function, auto_reweigh, averaging_mode, no_length_penalty - ) \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/benchmark/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/benchmark/__init__.py deleted file mode 100644 index 0317d5c623778fe40b7bf07b77769cd10c243244..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/benchmark/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# import models/tasks to register them -from . import dummy_dataset, dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/criterions/masked_lm.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/criterions/masked_lm.py deleted file mode 100644 index 279458f317ee258e393c4bf1879bb3c14a04ab51..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/criterions/masked_lm.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass -import math -from omegaconf import II - -import torch -from fairseq import metrics, modules, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass - - -@dataclass -class MaskedLmConfig(FairseqDataclass): - tpu: bool = II("common.tpu") - - -@register_criterion("masked_lm", dataclass=MaskedLmConfig) -class MaskedLmLoss(FairseqCriterion): - """ - Implementation for the loss used in masked language model (MLM) training. - """ - - def __init__(self, cfg: MaskedLmConfig, task): - super().__init__(task) - self.tpu = cfg.tpu - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - masked_tokens = sample["target"].ne(self.padding_idx) - sample_size = masked_tokens.int().sum() - - # Rare: when all tokens are masked, project all tokens. - # We use torch.where to avoid device-to-host transfers, - # except on CPU where torch.where is not well supported - # (see github.com/pytorch/pytorch/issues/26247). - if self.tpu: - masked_tokens = None # always project all tokens on TPU - elif masked_tokens.device == torch.device("cpu"): - if not masked_tokens.any(): - masked_tokens = None - else: - masked_tokens = torch.where( - masked_tokens.any(), - masked_tokens, - masked_tokens.new([True]), - ) - - logits = model(**sample["net_input"], masked_tokens=masked_tokens)[0] - targets = model.get_targets(sample, [logits]) - if masked_tokens is not None: - targets = targets[masked_tokens] - - loss = modules.cross_entropy( - logits.view(-1, logits.size(-1)), - targets.view(-1), - reduction="sum", - ignore_index=self.padding_idx, - ) - - logging_output = { - "loss": loss if self.tpu else loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["nsentences"], - "sample_size": sample_size, - } - return loss, sample_size, logging_output - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - - metrics.log_scalar( - "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 - ) - metrics.log_derived( - "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) - ) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/scoring/tokenizer.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/scoring/tokenizer.py deleted file mode 100644 index 61cf6d4a7cc698258caad9f68f2e8559dd510eee..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/scoring/tokenizer.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unicodedata - -from fairseq.dataclass import ChoiceEnum - - -class EvaluationTokenizer(object): - """A generic evaluation-time tokenizer, which leverages built-in tokenizers - in sacreBLEU (https://github.com/mjpost/sacrebleu). It additionally provides - lowercasing, punctuation removal and character tokenization, which are - applied after sacreBLEU tokenization. - - Args: - tokenizer_type (str): the type of sacreBLEU tokenizer to apply. - lowercase (bool): lowercase the text. - punctuation_removal (bool): remove punctuation (based on unicode - category) from text. - character_tokenization (bool): tokenize the text to characters. - """ - - SPACE = chr(32) - SPACE_ESCAPE = chr(9601) - ALL_TOKENIZER_TYPES = ChoiceEnum(["none", "13a", "intl", "zh", "ja-mecab"]) - - def __init__( - self, - tokenizer_type: str = "13a", - lowercase: bool = False, - punctuation_removal: bool = False, - character_tokenization: bool = False, - ): - from sacrebleu.tokenizers import TOKENIZERS - - assert tokenizer_type in TOKENIZERS, f"{tokenizer_type}, {TOKENIZERS}" - self.lowercase = lowercase - self.punctuation_removal = punctuation_removal - self.character_tokenization = character_tokenization - self.tokenizer = TOKENIZERS[tokenizer_type] - - @classmethod - def remove_punctuation(cls, sent: str): - """Remove punctuation based on Unicode category.""" - return cls.SPACE.join( - t - for t in sent.split(cls.SPACE) - if not all(unicodedata.category(c)[0] == "P" for c in t) - ) - - def tokenize(self, sent: str): - tokenized = self.tokenizer()(sent) - - if self.punctuation_removal: - tokenized = self.remove_punctuation(tokenized) - - if self.character_tokenization: - tokenized = self.SPACE.join( - list(tokenized.replace(self.SPACE, self.SPACE_ESCAPE)) - ) - - if self.lowercase: - tokenized = tokenized.lower() - - return tokenized diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download Microsoft Office Access Database Engine 2007 64 Bit NEW.md b/spaces/stomexserde/gpt4-ui/Examples/Download Microsoft Office Access Database Engine 2007 64 Bit NEW.md deleted file mode 100644 index d7089f9b7d54fa3a45bee98b4452352e9195daf1..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download Microsoft Office Access Database Engine 2007 64 Bit NEW.md +++ /dev/null @@ -1,143 +0,0 @@ - -

Download Microsoft Office Access Database Engine 2007 64 Bit

-

If you are looking for a way to work with data stored in Access database files (.mdb or .accdb) on a 64-bit system, you may need to download and install the Microsoft Office Access Database Engine 2007 64 Bit. This is a redistributable package that provides the necessary components and drivers for connecting to and manipulating data in Access databases using OLE DB or ODBC. In this article, we will explain what this package is, why you may need it, how to install it, how to use it, and what are some alternatives to it.

-

What is Microsoft Office Access Database Engine 2007 64 Bit?

-

Microsoft Office Access Database Engine 2007 64 Bit is a software component that allows other applications to access data stored in Access database files (.mdb or .accdb) using OLE DB or ODBC. OLE DB and ODBC are standard interfaces for communicating with different types of data sources, such as databases, spreadsheets, text files, etc. By installing this package, you can enable other applications, such as Excel, Word, PowerPoint, or your own custom programs, to read from and write to Access databases on a 64-bit system.

-

Download Microsoft Office Access Database Engine 2007 64 Bit


Download Zip ✔✔✔ https://urlgoal.com/2uIc5Z



-

This package is also known as the Microsoft ACE OLEDB Provider or the Microsoft Access Database Engine (ACE). It is a successor to the Microsoft Jet Database Engine (Jet), which was the default engine for Access databases until Access 2007. Jet was only available as a 32-bit component and could not run on a 64-bit system. Therefore, if you have a 64-bit system and want to work with Access databases, you need to install the ACE OLEDB Provider or the Access Database Engine.

-

Why use Microsoft Office Access Database Engine 2007 64 Bit?

-

Why use Microsoft Office Access Database Engine 2007 64 Bit?

-

There are several reasons why you may want to use the Microsoft Office Access Database Engine 2007 64 Bit on your system:

-
    -
  • You have a 64-bit system and want to work with data stored in Access database files (.mdb or .accdb).
  • -
  • You have a legacy application that uses Jet OLEDB Provider or Jet ODBC Driver to connect to Access databases and want to run it on a 64-bit system.
  • -
  • You want to create or modify Access databases using other applications, such as Excel, Word, PowerPoint, or your own custom programs.
  • -
  • You want to take advantage of the features and performance improvements of the ACE OLEDB Provider or the Access Database Engine over the Jet engine.
  • -
-

Some of the benefits of using the ACE OLEDB Provider or the Access Database Engine over the Jet engine are:

-
    -
  • Support for new data types, such as multivalued fields, attachments, calculated fields, etc.
  • -
  • Support for new file formats, such as .accdb and .accde.
  • -
  • Support for encryption and compression of database files.
  • -
  • Support for SharePoint integration and synchronization.
  • -
  • Better compatibility with SQL Server and other data sources.
  • -
  • Better security and reliability.
  • How to install Microsoft Office Access Database Engine 2007 64 Bit?

    -

    If you want to install the Microsoft Office Access Database Engine 2007 64 Bit on your system, you need to follow these steps:

    -

    Check your system requirements

    -

    Before you download and install the package, you need to make sure that your system meets the minimum requirements for running it. According to the official documentation, these are the system requirements:

    -
      -
    • Operating system: Windows 10, Windows 8, Windows 7, Windows Vista, Windows Server 2019, Windows Server 2016, Windows Server 2012, Windows Server 2008 R2, or Windows Server 2008.
    • -
    • Processor: Intel or compatible x86 or x64 processor.
    • -
    • Memory: At least 256 MB of RAM.
    • -
    • Disk space: At least 50 MB of free disk space.
    • -
    • Other software: Microsoft Data Access Components (MDAC) 2.8 SP1 or later.
    • -
    -

    You also need to check if you have any other version of the Access Database Engine or the ACE OLEDB Provider installed on your system. If you do, you may need to uninstall it before installing the new version. You can check this by going to Control Panel > Programs and Features and looking for any entries related to Access Database Engine or ACE OLEDB Provider.

    -

    Download the redistributable package

    -

    Once you have verified that your system meets the requirements, you can download the redistributable package from the Microsoft Download Center. There are two versions of the package available: one for x86 systems and one for x64 systems. You need to download the version that matches your system architecture. If you are not sure whether your system is x86 or x64, you can check this by going to Control Panel > System and Security > System and looking for the System type information.

    -

    -

    The file name of the package is AccessDatabaseEngine.exe for x86 systems and AccessDatabaseEngine_X64.exe for x64 systems. The file size is about 25 MB for both versions. You can save the file to any location on your system, such as your desktop or downloads folder.

    -

    Run the setup file

    -

    After you have downloaded the package, you can run the setup file by double-clicking on it or right-clicking on it and choosing Run as administrator. This will launch the installation wizard that will guide you through the process. You may need to accept the license agreement and choose a destination folder for the installation. The default folder is C:\Program Files\Microsoft Office\Office12\ for x86 systems and C:\Program Files (x86)\Microsoft Office\Office12\ for x64 systems. You can change this if you want, but make sure that you have enough disk space in the chosen folder.

    -

    The installation may take a few minutes to complete. You may see a progress bar and some messages on the screen. When the installation is finished, you may need to restart your system for the changes to take effect.

    -

    Verify the installation

    -

    To verify that the installation was successful, you can check if the Access Database Engine or the ACE OLEDB Provider is registered on your system. You can do this by using a tool called OLE DB Data Link Properties, which is a graphical user interface for creating and testing OLE DB connections. You can access this tool by going to Start > Run and typing oledb32.dll in the dialog box. This will open a window where you can create a new data link or modify an existing one.

    -

    To create a new data link, click on New... and then select Microsoft Office 12.0 Access Database Engine OLE DB Provider from the list of providers. Click Next and then browse for an Access database file (.mdb or .accdb) that you want to connect to. Click OK and then Test Connection to see if the connection is successful. If it is, you will see a message saying "Test connection succeeded". If not, you will see an error message with some details about what went wrong.

    -

    If you can connect to an Access database file using the OLE DB Data Link Properties tool, it means that the Access Database Engine or the ACE OLEDB Provider is installed and working properly on your system.

    How to use Microsoft Office Access Database Engine 2007 64 Bit?

    -

    After you have installed the Microsoft Office Access Database Engine 2007 64 Bit on your system, you can use it to connect to and manipulate data in Access database files (.mdb or .accdb) using OLE DB or ODBC. There are different ways to use the Access Database Engine or the ACE OLEDB Provider, depending on the type of application you are using and the purpose of your data access. Here are some common scenarios and examples of how to use the Access Database Engine or the ACE OLEDB Provider:

    -

    Connect to an Access database file

    -

    The first step to use the Access Database Engine or the ACE OLEDB Provider is to establish a connection to an Access database file. A connection is a link between your application and the data source that allows you to send and receive data. To create a connection, you need to specify some information about the data source, such as the file name, the location, the provider, and any security settings. This information is usually stored in a connection string, which is a text string that contains the parameters for the connection.

    -

    There are different ways to create a connection string, depending on the application you are using and the interface you are using. For example, if you are using Excel, you can use the Data Connection Wizard to create a connection string by following these steps:

    -
      -
    1. Open Excel and go to Data > Get Data > From Database > From Microsoft Access Database.
    2. -
    3. Browse for the Access database file (.mdb or .accdb) that you want to connect to and click Open.
    4. -
    5. Select the table or query that you want to import from the database and click Load.
    6. -
    7. Excel will create a connection string and import the data from the database into a worksheet.
    8. -
    -

    If you want to see or modify the connection string, you can go to Data > Queries & Connections and right-click on the query name and choose Properties. This will open a window where you can see and edit the connection string under the Definition tab.

    -

    A typical connection string for connecting to an Access database file using the ACE OLEDB Provider looks like this:

    - Provider=Microsoft.ACE.OLEDB.12.0;Data Source=C:\Users\user\Documents\Database1.accdb; -

    The Provider parameter specifies the name of the OLE DB provider that is used to access the data source. The Data Source parameter specifies the path and name of the Access database file. You can also add other parameters to the connection string, such as User ID, Password, Mode, Extended Properties, etc., depending on your security and configuration settings. You can find more information about the connection string parameters for the ACE OLEDB Provider here: https://docs.microsoft.com/en-us/office/client-developer/access/desktop-database-reference/ace-provider-oledb-connection-strings

    -

    Perform queries and operations on the data

    -

    Once you have established a connection to an Access database file, you can perform various queries and operations on the data using SQL (Structured Query Language) statements. SQL is a standard language for accessing and manipulating data in relational databases, such as Access. You can use SQL statements to select, insert, update, delete, or join data from one or more tables or queries in the database.

    -

    There are different ways to execute SQL statements on an Access database file, depending on the application you are using and the interface you are using. For example, if you are using Excel, you can use Power Query Editor to execute SQL statements by following these steps:

    -
      -
    1. Open Excel and go to Data > Get Data > From Database > From Microsoft Access Database.
    2. -
    3. Browse for the Access database file (.mdb or .accdb) that you want to connect to and click Open.
    4. -
    5. Select any table or query from the database and click Transform Data.
    6. -
    7. This will open Power Query Editor, where you can see and edit the query that was created by Excel.
    8. -
    9. Go to Home > Advanced Editor and replace the query with your own SQL statement.
    10. -
    11. Click Done and then Close & Load.
    12. -
    13. Excel will execute your SQL statement and import the results into a worksheet.
    14. -
    -

    A typical SQL statement for selecting data from an Access database file looks like this:

    - SELECT * FROM Customers WHERE Country = 'USA'; -

    This statement selects all columns (*) from the Customers table where the Country column is equal to 'USA'. You can also use other clauses and operators in your SQL statement, such as WHERE, ORDER BY, GROUP BY, HAVING, JOIN, etc., depending on your query criteria and logic. You can find more information about the SQL syntax and examples for Access here: https://docs.microsoft.com/en-us/office/client-developer/access/desktop-database-reference/sql-data-manipulation-language

    -

    Troubleshoot common issues and errors

    -

    While using the Microsoft Office Access Database Engine 2007 64 Bit, you may encounter some common issues and errors that may prevent you from connecting to or working with Access database files. Here are some of the possible causes and solutions for these issues and errors:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Issue/ErrorCauseSolution
    The 'Microsoft.ACE.OLEDB.12.0' provider is not registered on the local machine.The Access Database Engine or the ACE OLEDB Provider is not installed or registered on your system.Download and install the Access Database Engine or the ACE OLEDB Provider from the Microsoft Download Center. Make sure you download the correct version (x86 or x64) for your system.
    The requested operation requires an OLE DB Session object, which is not supported by the current provider.You are trying to use a feature or function that is not supported by the Access Database Engine or the ACE OLEDB Provider, such as multiple active result sets (MARS) or table-valued parameters (TVPs).Use a different feature or function that is supported by the Access Database Engine or the ACE OLEDB Provider, such as stored procedures or parameterized queries.
    The Microsoft Office Access database engine cannot open or write to the file. It is already opened exclusively by another user, or you need permission to view and write its data.The Access database file (.mdb or .accdb) that you are trying to connect to or modify is locked by another user or process, or you do not have the required permissions to access it.Make sure that no other user or process is using the Access database file, or close any applications that may have opened it. Check the file properties and security settings and make sure that you have read and write permissions for the file.
    The Microsoft Office Access database engine cannot find the input table or query. Make sure it exists and that its name is spelled correctly.The table or query name that you are using in your SQL statement does not exist in the Access database file (.mdb or .accdb) that you are connecting to, or you have misspelled it.Check the Access database file and make sure that the table or query name exists and matches the name that you are using in your SQL statement. Use double quotes around the table or query name if it contains spaces or special characters.
    Data type mismatch in criteria expression.The data type of a value that you are using in your SQL statement does not match the data type of the corresponding column in the Access database file (.mdb or .accdb) that you are connecting to, or you have used an invalid operator or function for the data type.Check the data types of the columns and values that you are using in your SQL statement and make sure that they match. Use single quotes around text values, pound signs (#) around date values, and appropriate operators and functions for each data type.
    -

    If you encounter any other issues or errors while using the Microsoft Office Access Database Engine 2007 64 Bit, you can search for solutions online using your preferred search engine, such as Bing. You can also visit the official support forums for Access here: https://answers.microsoft.com/en-us/msoffice/forum/msoffice_access

    -

    Alternatives to Microsoft Office Access Database Engine 2007 64 Bit

    -

    While the Microsoft Office Access Database Engine 2007 64 Bit is a useful and powerful tool for working with data stored in Access database files (.mdb or .accdb), it may not be suitable for everyone's needs and preferences. If you are looking for some alternatives to this tool, here are some options that you can consider:

    -

    LibreOffice Base

    -

    LibreOffice Base is a free and open source database management system that is part of the LibreOffice suite of applications. It allows you to create and manage databases, tables, queries, forms, reports, and macros using a graphical user interface or SQL. It also supports connecting to various external data sources, such as MySQL, PostgreSQL, Oracle, Firebird, HSQLDB, etc., using ODBC or JDBC drivers. You can also use LibreOffice Base to connect to and work with Access database files (.mdb or .accdb) using its built-in drivers or the UCanAccess driver. You can download LibreOffice Base from here: https://www.libreoffice.org/download/download/

    -

    Kexi

    -

    Kexi is another free and open source database management system that is part of the Calligra suite of applications. It is similar to LibreOffice Base in terms of features and functionality, but it has a more modern and user-friendly interface. It also supports connecting to various external data sources, such as MySQL, PostgreSQL, SQLite, etc., using ODBC or JDBC drivers. You can also use Kexi to connect to and work with Access database files (.mdb or .accdb) using the MDB Tools driver or the UCanAccess driver. You can download Kexi from here: https://kexi-project.org/download.html

    -

    Axisbase

    -

    Axisbase is a free and lightweight database management system that is designed to be easy to use and fast to deploy. It allows you to create and manage databases, tables, queries, forms, reports, and charts using a graphical user interface or SQL. It also supports connecting to various external data sources, such as Excel, CSV, XML, etc., using ODBC drivers. You can also use Axisbase to connect to and work with Access database files (.mdb or .accdb) using the MDB Tools driver or the UCanAccess driver. You can download Axisbase from here: http://www.axisbase.com/download.html

    -

    Conclusion

    -

    In this article, we have explained what the Microsoft Office Access Database Engine 2007 64 Bit is, why you may need it, how to install it, how to use it, and what are some alternatives to it. We hope that this article has helped you understand how to work with data stored in Access database files (.mdb or .accdb) on a 64-bit system using OLE DB or ODBC. If you have any questions or feedback, please feel free to leave a comment below.

    -

    FAQs

    -

    Here are some frequently asked questions and answers about the Microsoft Office Access Database Engine 2007 64 Bit:

    -
      -
    1. Q: Can I use the Microsoft Office Access Database Engine 2007 64 Bit on a 32-bit system?
    2. -
    3. A: Yes, you can use the Microsoft Office Access Database Engine 2007 64 Bit on a 32-bit system, as long as you download and install the x86 version of the package. However, you may not need it if you already have a compatible version of Microsoft Office or Microsoft Access installed on your system.
    4. -
    5. Q: Can I use the Microsoft Office Access Database Engine 2007 64 Bit with other versions of Microsoft Office or Microsoft Access?
    6. -
    7. A: Yes, you can use the Microsoft Office Access Database Engine 2007 64 Bit with other versions of Microsoft Office or Microsoft Access, such as Office 2010, Office 2013, Office 2016, Office 2019, or Office 365. However, you may not need it if you already have a compatible version of the Access Database Engine or the ACE OLEDB Provider installed on your system.
    8. -
    9. Q: Can I use the Microsoft Office Access Database Engine 2007 64 Bit with other types of database files, such as SQL Server, Oracle, MySQL, etc.?
    10. -
    11. A: No, you cannot use the Microsoft Office Access Database Engine 2007 64 Bit with other types of database files, such as SQL Server, Oracle, MySQL, etc. The Access Database Engine or the ACE OLEDB Provider only supports connecting to and manipulating data in Access database files (.mdb or .accdb). If you want to work with other types of database files, you need to use other tools or drivers that are compatible with them.
    12. -
    13. Q: Can I use the Microsoft Office Access Database Engine 2007 64 Bit with other programming languages or frameworks, such as C#, Java, Python, etc.?
    14. -
    15. A: Yes, you can use the Microsoft Office Access Database Engine 2007 64 Bit with other programming languages or frameworks, such as C#, Java, Python, etc., as long as they support using OLE DB or ODBC for data access. You can use the same connection string and SQL statements that you would use in other applications that use OLE DB or ODBC.
    16. -
    17. Q: Where can I find more information and resources about the Microsoft Office Access Database Engine 2007 64 Bit?
    18. -
    19. A: You can find more information and resources about the Microsoft Office Access Database Engine 2007 64 Bit here:
    20. -
        -
      • The official documentation: https://docs.microsoft.com/en-us/office/client-developer/access/desktop-database-reference/microsoft-access-database-engine-2007-red istributable-package)
        -- The official download page: https://www.microsoft.com/en-us/download/details.aspx?id=23734
        -- The official support forums: https://answers.microsoft.com/en-us/msoffice/forum/msoffice_access
        -- The official blog: https://techcommunity.microsoft.com/t5/access-blog/bg-p/AccessBlog
        -- The official YouTube channel: https://www.youtube.com/channel/UCy1U85Q4mWycZL3wRmNXQwQ
        -
      -

    b2dd77e56b
    -
    -
    \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Evermotion Archmodels Vol.1 117 Torrent __HOT__.md b/spaces/stomexserde/gpt4-ui/Examples/Evermotion Archmodels Vol.1 117 Torrent __HOT__.md deleted file mode 100644 index 690e5b9a4b1d3e9abf054fd328607b043e5f11e1..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Evermotion Archmodels Vol.1 117 Torrent __HOT__.md +++ /dev/null @@ -1,21 +0,0 @@ - -I can try to write a title and an article with SEO optimization and HTML formatting for the keyword "Evermotion Archmodels Vol.1 117 Torrent". Here is what I came up with: - -

    Evermotion Archmodels Vol.1 117 Torrent: Download 56 Highly Detailed Models of Trees

    -

    If you are looking for realistic and detailed models of trees for your 3D projects, you might want to check out Evermotion Archmodels Vol.1 117 Torrent. This collection includes 56 high-quality models of various species of trees, ready to use with textures and shaders. You can also find 3 cover scenes with all models, materials, lighting and scene setup included.

    -

    Evermotion Archmodels Vol.1 117 Torrent


    DOWNLOAD 🆗 https://urlgoal.com/2uIc0m



    -

    Evermotion Archmodels Vol.1 117 Torrent is compatible with 3ds max 2009 and V-ray 1.5. You can render the models and scenes with stunning results using these software. The trees species are not specified, so you can use them for any kind of environment and season. Whether you need a lush forest, a park, a garden, or a street, you can find the perfect tree model for your scene.

    -

    Evermotion Archmodels Vol.1 117 Torrent is available for download from various sources on the internet. You can use a torrent client to download the files quickly and safely. However, you should be aware of the legal and ethical issues of downloading copyrighted content without permission. You should always respect the work of the original creators and support them by purchasing their products if you can.

    -

    Evermotion Archmodels Vol.1 117 Torrent is a great resource for 3D artists, architects, designers, and hobbyists who want to create realistic and beautiful scenes with trees. You can save time and effort by using these ready-made models instead of creating them from scratch. You can also learn from the professional techniques and settings used by Evermotion team.

    -

    If you are interested in Evermotion Archmodels Vol.1 117 Torrent, you can find more information and previews on Evermotion website[^1^]. You can also browse other collections of Archmodels that cover different topics and themes such as furniture, buildings, vehicles, plants, animals, and more.

    -

    I can try to continue the article. Here is what I came up with: - -

    Evermotion Archmodels Vol.1 117 Torrent is not the only collection of tree models that you can find online. There are other sources that offer similar or different types of models for free or for a fee. Some of them are:

    -
      -
    • 3D Warehouse: This is a platform where you can find and share 3D models of various categories, including trees. You can download the models in different formats and use them in your projects. You can also upload your own models and share them with the community.
    • -
    • CGTrader: This is a marketplace where you can buy and sell 3D models of various categories, including trees. You can find high-quality models with realistic textures and shaders for different software and renderers. You can also sell your own models and earn money.
    • -
    • Turbosquid: This is another marketplace where you can buy and sell 3D models of various categories, including trees. You can find a wide range of models with different styles and levels of detail for different software and renderers. You can also sell your own models and earn money.
    • -
    -

    These are some of the alternatives that you can explore if you are looking for tree models for your 3D projects. However, you should always check the license and terms of use of each model before downloading or using it. You should also respect the intellectual property rights of the original creators and avoid any illegal or unethical actions.

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Iar Embedded Workbench For Msp430 5.51 Crack !!LINK!!.md b/spaces/stomexserde/gpt4-ui/Examples/Iar Embedded Workbench For Msp430 5.51 Crack !!LINK!!.md deleted file mode 100644 index 4419bd6f96192b808931e003485b1edbc5ebbb6b..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Iar Embedded Workbench For Msp430 5.51 Crack !!LINK!!.md +++ /dev/null @@ -1,27 +0,0 @@ -
    -

    How to Use IAR Embedded Workbench for MSP430 5.51

    -

    IAR Embedded Workbench for MSP430 is a complete development toolchain for MSP430 microcontrollers. It provides a user-friendly integrated development environment (IDE), a powerful C and C++ compiler, a comprehensive debugger, and integrated code analysis tools. In this article, we will show you how to use IAR Embedded Workbench for MSP430 5.51 to create, build, and debug a simple project for an MSP430 device.

    -

    Step 1: Install IAR Embedded Workbench for MSP430 5.51

    -

    You can download the latest version of IAR Embedded Workbench for MSP430 from the IAR website[^1^]. The installation process is straightforward and you can follow the instructions on the screen. You will need a license to use the full version of the toolchain, but you can also use the free Kickstart edition that supports up to 16 KB of code size. You can order the Kickstart edition from the Texas Instruments website[^2^].

    -

    iar embedded workbench for msp430 5.51 crack


    Download File 🌟 https://urlgoal.com/2uIbdx



    -

    Step 2: Create a New Project

    -

    After installing IAR Embedded Workbench for MSP430, you can launch it from the Start menu or the desktop shortcut. To create a new project, select File > New > Project... from the menu bar. A dialog box will appear where you can choose a project template and a device. For this example, we will use the Blink LED template and the MSP430F5529 device. Click OK to create the project.

    -

    The project will contain a main.c file with some sample code that toggles an LED on the device. You can view and edit the code in the editor window. You can also explore the project options by right-clicking on the project name in the Workspace window and selecting Options.... Here you can configure various settings such as compiler options, linker options, debugger options, etc.

    -

    Step 3: Build and Download the Project

    -

    To build the project, select Project > Make from the menu bar or press F7. The compiler will compile the source files and generate an output file in TI's msp430-txt format[^1^]. You can view the build messages in the Build window at the bottom of the IDE.

    -

    To download the project to the device, you need to connect a debug probe to your PC and to your device. IAR Embedded Workbench for MSP430 supports various debug probes such as TI's MSP-FET[^3^], Elprotronic's XStream-Iso[^1^], etc. You can select your debug probe in the project options under Debugger > Setup > Driver.

    -

    Once you have connected your debug probe, select Project > Download and Debug from the menu bar or press Ctrl + D. The debugger will download the output file to the device and start a debugging session. You can view and control the debugging process in the C-SPY Debugger window that opens up.

    -

    Step 4: Debug and Analyze the Project

    -

    In the C-SPY Debugger window, you can use various tools to debug and analyze your project. For example, you can:

    -

    -
      -
    • Use the toolbar buttons or keyboard shortcuts to run, pause, step, or reset your program.
    • -
    • Use breakpoints, watchpoints, or tracepoints to stop or monitor your program at specific locations or conditions.
    • -
    • Use windows such as Disassembly, Registers, Memory, Call Stack, etc. to inspect and modify various aspects of your program state.
    • -
    • Use windows such as Power Debugging[^1^], ULP Advisor[^1^] [^2^], C-STAT[^1^], etc. to measure and optimize your power consumption, code quality, and compliance with coding standards.
    • -
    -

    You can also customize your debugging environment by adding or removing windows, changing their layout, setting preferences, etc.

    -

    Conclusion

    -

    In this article, we have shown you how to use IAR Embedded Workbench for MSP430 5.51 to create, build, and debug a simple project for an MSP430 device. We have also introduced some of the features and tools that IAR Embedded Workbench for MSP430 offers to help you develop high-quality and low-power applications for MSP430 microcontrollers.

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Jovan Deretic Istorija Srba Pdf !!TOP!! Download.md b/spaces/stomexserde/gpt4-ui/Examples/Jovan Deretic Istorija Srba Pdf !!TOP!! Download.md deleted file mode 100644 index ff04e1ff62ec138e817b30a72e987ee648726b14..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Jovan Deretic Istorija Srba Pdf !!TOP!! Download.md +++ /dev/null @@ -1,17 +0,0 @@ -
    -Here is a possible title and article with html formatting for the keyword "jovan deretic istorija srba pdf download": - -

    Who is Jovan Deretic and what is his book Istorija srpske knjizevnosti?

    -

    Jovan Deretic is a Serbian writer and historian who has written several books on the history and culture of the Serbian people. One of his most famous works is Istorija srpske knjizevnosti (History of Serbian Literature), which was first published in 1983 and has been reprinted several times since then.

    -

    jovan deretic istorija srba pdf download


    Download Zip · https://urlgoal.com/2uI76T



    -

    Istorija srpske knjizevnosti is a comprehensive overview of the development of Serbian literature from its origins in the medieval period to the modern times. It covers various genres, styles, movements, authors, and works that have shaped the Serbian literary tradition. It also explores the historical, social, political, and religious contexts that influenced the literary production and reception of Serbian writers.

    -

    The book is divided into six parts: Old Literature, Oral Forms of Creation, From Old to New Literature (Baroque Tendencies), Enlightenment and the Beginnings of New Literature, Classical Edition of Folk Epic, and Pre-Romanticism (Literature of Vuk's Time). Each part consists of several chapters that provide detailed analyses of specific literary phenomena and examples. The book also includes a preface, a bibliography, an index of names, and an index of terms.

    -

    Istorija srpske knjizevnosti is considered to be one of the most authoritative and comprehensive studies of Serbian literature ever written. It is widely used as a reference and a textbook by students, scholars, and general readers who are interested in learning more about the rich and diverse literary heritage of the Serbian people.

    -

    If you want to read Istorija srpske knjizevnosti by Jovan Deretic, you can download it as a PDF file from various online sources. However, you should be aware that some of these sources may not be reliable or legal. Therefore, it is advisable to check the quality and legitimacy of the PDF file before downloading it. Alternatively, you can buy a printed copy of the book from reputable bookstores or online platforms.

    Here is a possible continuation of the article: - -

    Modern Serbian literature emerged in the 19th century, as part of the national awakening and cultural emancipation of the Serbian people from Ottoman rule. The first modern Serbian writers adopted the language and folklore of the common people, as reformed and standardized by Vuk Stefanović Karadžić, a linguist and folklorist who collected and published thousands of oral poems, songs, stories, proverbs, and riddles. Karadžić's work inspired many poets, such as Petar II Petrović Njegoš, Branko Radičević, Jovan Jovanović Zmaj, and Đura Jakšić, who expressed their patriotic, romantic, and lyrical sentiments in verse. Some of them also wrote prose works, such as Njegoš's philosophical epic The Mountain Wreath (1847) and Jakšić's realistic stories.

    -

    In the late 19th and early 20th centuries, Serbian literature was influenced by various European movements, such as realism, naturalism, symbolism, modernism, and expressionism. Some of the most prominent writers of this period were Svetozar Marković, a social critic and political activist; Laza Lazarević, a psychiatrist and master of psychological short stories; Svetozar Ćorović, a prolific novelist and chronicler of Bosnian life; Janko Veselinović, a writer of rural novels and stories; Simo Matavulj, a traveler and cosmopolitan storyteller; Milovan Glišić, a humorist and translator; Branislav Nušić, a playwright and satirist; Radoje Domanović, a writer of political allegories; Borisav Stanković, a regionalist and modernist who depicted the decay of old patriarchal values; Isidora Sekulić, a feminist and essayist; Miloš Crnjanski, a poet and novelist who explored the themes of exile and identity; Ivo Andrić, a Nobel laureate who wrote historical novels and stories about Bosnia; Meša Selimović, another Bosnian writer who examined the moral dilemmas of his time; Danilo Kiš, a postmodernist who experimented with form and language; Milorad Pavić, a writer of unconventional and playful novels that challenged the conventional notions of literature; and many others.

    -

    -

    Contemporary Serbian literature reflects the diversity and complexity of the Serbian society and culture in the post-communist and post-war era. Some of the current trends include postmodernism, postcolonialism, feminism, queer theory, ecocriticism, trauma studies, memory studies, and transnationalism. Some of the notable writers of this period are David Albahari, a Jewish-Serbian writer who immigrated to Canada; Svetlana Velmar-Janković, a historian and novelist who wrote about Belgrade's past and present; Goran Petrović, a magical realist who blended fantasy and reality in his novels; Vladimir Arsenijević, a writer of the so-called lost generation who depicted the urban youth culture in the 1990s; Uglješa Šajtinac, a playwright and novelist who explored the absurdity and violence of contemporary society; Biljana Srbljanović, a dramatist who criticized the political and social situation in Serbia; Dubravka Ugrešić, a Croatian-Serbian writer who lives in exile in Amsterdam; Aleksandar Hemon, a Bosnian-Serbian writer who lives in Chicago; Jelena Lengold, a poet and short story writer who won the European Union Prize for Literature in 2011; Svetislav Basara , a novelist and essayist who experiments with various genres and styles; Vladimir Pištalo , a historian and novelist who lives in Boston; Dragan Velikić , a novelist and essayist who writes about history , memory , identity ,and exile ;and many others .

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/sunmaiyyyy/combined-GI-RVC-model/infer_pack/modules.py b/spaces/sunmaiyyyy/combined-GI-RVC-model/infer_pack/modules.py deleted file mode 100644 index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000 --- a/spaces/sunmaiyyyy/combined-GI-RVC-model/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/sunshineatnoon/TextureScraping/libs/transformer_cluster.py b/spaces/sunshineatnoon/TextureScraping/libs/transformer_cluster.py deleted file mode 100644 index 9789418c44c37d2f77bb0b6ff4fee20237a66ba0..0000000000000000000000000000000000000000 --- a/spaces/sunshineatnoon/TextureScraping/libs/transformer_cluster.py +++ /dev/null @@ -1,219 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class PositionEmbs(nn.Module): - def __init__(self, num_patches, emb_dim, dropout_rate=0.1): - super(PositionEmbs, self).__init__() - self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, emb_dim)) - if dropout_rate > 0: - self.dropout = nn.Dropout(dropout_rate) - else: - self.dropout = None - - def forward(self, x): - out = x + self.pos_embedding - - if self.dropout: - out = self.dropout(out) - - return out - - -class MlpBlock(nn.Module): - """ Transformer Feed-Forward Block """ - def __init__(self, in_dim, mlp_dim, out_dim, dropout_rate=0.1): - super(MlpBlock, self).__init__() - - # init layers - self.fc1 = nn.Linear(in_dim, mlp_dim) - self.fc2 = nn.Linear(mlp_dim, out_dim) - self.act = nn.GELU() - if dropout_rate > 0.0: - self.dropout1 = nn.Dropout(dropout_rate) - self.dropout2 = nn.Dropout(dropout_rate) - else: - self.dropout1 = None - self.dropout2 = None - - def forward(self, x): - - out = self.fc1(x) - out = self.act(out) - if self.dropout1: - out = self.dropout1(out) - - out = self.fc2(out) - if self.dropout2: - out = self.dropout2(out) - return out - - -class LinearGeneral(nn.Module): - def __init__(self, in_dim=(768,), feat_dim=(12, 64)): - super(LinearGeneral, self).__init__() - - self.weight = nn.Parameter(torch.randn(*in_dim, *feat_dim)) - self.bias = nn.Parameter(torch.zeros(*feat_dim)) - - def forward(self, x, dims): - a = torch.tensordot(x, self.weight, dims=dims) + self.bias - return a - - -class SelfAttention(nn.Module): - def __init__(self, in_dim, heads=8, dropout_rate=0.1): - super(SelfAttention, self).__init__() - self.heads = heads - self.head_dim = in_dim // heads - self.scale = self.head_dim ** 0.5 - - self.query = LinearGeneral((in_dim,), (self.heads, self.head_dim)) - self.key = LinearGeneral((in_dim,), (self.heads, self.head_dim)) - self.value = LinearGeneral((in_dim,), (self.heads, self.head_dim)) - self.out = LinearGeneral((self.heads, self.head_dim), (in_dim,)) - - if dropout_rate > 0: - self.dropout = nn.Dropout(dropout_rate) - else: - self.dropout = None - - self.cluster_mlp = nn.Sequential(nn.Linear(256 * 100, 64 * 100), - nn.LeakyReLU(0.2), - nn.Linear(64 * 100, 8 * 100)) - - def forward(self, x): - b, n, _ = x.shape - - q = self.query(x, dims=([2], [0])) - q = self.cluster_mlp(q.view(b, -1)).view(b, 8, 1, 100) - k = self.key(x, dims=([2], [0])) - v = self.value(x, dims=([2], [0])) - - q = q.permute(0, 2, 1, 3) - k = k.permute(0, 2, 1, 3) - v = v.permute(0, 2, 1, 3) - - attn_weights = torch.matmul(q, k.transpose(-2, -1)) / self.scale - attn_weights = F.softmax(attn_weights, dim=-1) - out = torch.matmul(attn_weights, v) - out = out.permute(0, 2, 1, 3) - - out = self.out(out, dims=([2, 3], [0, 1])) - - return out - - -class EncoderBlock(nn.Module): - def __init__(self, in_dim, mlp_dim, num_heads, dropout_rate=0.1, attn_dropout_rate=0.1): - super(EncoderBlock, self).__init__() - - self.norm1 = nn.LayerNorm(in_dim) - self.attn = SelfAttention(in_dim, heads=num_heads, dropout_rate=attn_dropout_rate) - if dropout_rate > 0: - self.dropout = nn.Dropout(dropout_rate) - else: - self.dropout = None - self.norm2 = nn.LayerNorm(in_dim) - self.mlp = MlpBlock(in_dim, mlp_dim, in_dim, dropout_rate) - - def forward(self, x): - residual = x - out = self.norm1(x) - out = self.attn(out) - if self.dropout: - out = self.dropout(out) - #out += residual - residual = out - - out = self.norm2(out) - out = self.mlp(out) - out += residual - return out - - -class Encoder(nn.Module): - def __init__(self, num_patches, emb_dim, mlp_dim, num_layers=12, num_heads=12, dropout_rate=0.1, attn_dropout_rate=0.0): - super(Encoder, self).__init__() - - # positional embedding - self.pos_embedding = PositionEmbs(num_patches, emb_dim, dropout_rate) - - # encoder blocks - in_dim = emb_dim - self.encoder_layers = nn.ModuleList() - for i in range(num_layers): - layer = EncoderBlock(in_dim, mlp_dim, num_heads, dropout_rate, attn_dropout_rate) - self.encoder_layers.append(layer) - self.norm = nn.LayerNorm(in_dim) - - def forward(self, x): - - out = self.pos_embedding(x) - - for layer in self.encoder_layers: - out = layer(out) - - out = self.norm(out) - return out - -class VisionTransformer(nn.Module): - """ Vision Transformer """ - def __init__(self, - image_size=(256, 256), - patch_size=(16, 16), - emb_dim=768, - mlp_dim=3072, - num_heads=12, - num_layers=12, - num_classes=1000, - attn_dropout_rate=0.0, - dropout_rate=0.1, - feat_dim=None): - super(VisionTransformer, self).__init__() - h, w = image_size - - # embedding layer - fh, fw = patch_size - gh, gw = h // fh, w // fw - num_patches = gh * gw - self.embedding = nn.Conv2d(3, emb_dim, kernel_size=(fh, fw), stride=(fh, fw)) - # class token - self.cls_token = nn.Parameter(torch.zeros(1, 1, emb_dim)) - - # transformer - self.transformer = Encoder( - num_patches=num_patches, - emb_dim=emb_dim, - mlp_dim=mlp_dim, - num_layers=num_layers, - num_heads=num_heads, - dropout_rate=dropout_rate, - attn_dropout_rate=attn_dropout_rate) - - # classfier - self.classifier = nn.Linear(emb_dim, num_classes) - - def forward(self, x): - emb = self.embedding(x) # (n, c, gh, gw) - emb = emb.permute(0, 2, 3, 1) # (n, gh, hw, c) - b, h, w, c = emb.shape - emb = emb.reshape(b, h * w, c) - - # prepend class token - cls_token = self.cls_token.repeat(b, 1, 1) - emb = torch.cat([cls_token, emb], dim=1) - - # transformer - feat = self.transformer(emb) - - # classifier - logits = self.classifier(feat[:, 0]) - return logits - -if __name__ == '__main__': - model = VisionTransformer(num_layers=2) - import pdb; pdb.set_trace() - x = torch.randn((2, 3, 256, 256)) - out = model(x) \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Casino Royale Dual Audio Eng-hindi 720p Movies.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Casino Royale Dual Audio Eng-hindi 720p Movies.md deleted file mode 100644 index c047cde03e3a44221903791d9dd4696c39a83c89..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Casino Royale Dual Audio Eng-hindi 720p Movies.md +++ /dev/null @@ -1,8 +0,0 @@ -
    -

    now you can get a 17gb for a 1.5 gb! before the download of casino royale james bond movie part 22 (2006) full movie (hindi-english) 1080p & 2160p from above link. you can also download casino royale james bond movie part 22 (2006) full movie (hindi-english) 720p & 1080p & 2160p from below link.

    -

    with the popularization of michael manns thriller heat in 1995, the genre flourished, with a new wave of actors like matt damon and robert de niro adding their names to the list of stars who have called las vegas home in recent years. by 2006, the genre had become a regular part of the awards season, with films such as rounders, the italian job, and the aforementioned casino royale all setting the stage for the films of 2007. see full

    casino royale (2006) hindi dubbed [warner brother] full movie review

    -

    casino royale dual audio eng-hindi 720p movies


    DOWNLOAD ››› https://cinurl.com/2uEYry



    -

    the film tells the story of retired british intelligence agent james bond (played by daniel craig) who targets vesper lynd (eva green), an mi6 trainee and bond's on-and-off lover, who is in possession of a priceless statue from a secret egyptian tomb with information leading to a terrorist organization that is planning to release a deadly virus in las vegas. as he prepares to foil their plans and track down lynd, bond faces off against a greedy triad leader who wants the artifact for his own black magic purposes.

    -

    while the first film in the series was a dull disappointment, the overall quality of bond movies in the past decade or so has been a marked improvement. by and large, the producers have steered clear of turning their films into long-winded action-adventure epics of the scorsese/schumacher sort (although the 1999 casino royale has its own problems in this area). rather than attempting to do big-budget bond up in the ways of the bourne series and the matrix series, the producers have opted for smaller-scale films that adhere to the formula established by the old bond films and casino royale is no exception.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Kvisoft Flipbook Maker Pro 4 Full [BETTER] Crackl.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Kvisoft Flipbook Maker Pro 4 Full [BETTER] Crackl.md deleted file mode 100644 index ece645cee3287b1673d7548782d1314a8cd99ca9..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Kvisoft Flipbook Maker Pro 4 Full [BETTER] Crackl.md +++ /dev/null @@ -1,32 +0,0 @@ - -

    Kvisoft Flipbook Maker Pro 4: A Powerful Tool for Creating Interactive Digital Publications

    -

    Kvisoft Flipbook Maker Pro 4 is a software that allows you to convert your PDF, Word, Excel, PowerPoint and other files into stunning HTML5 and Flash-based flipbooks. You can create magazines, catalogs, e-books, brochures and more with realistic page-flipping animation and rich media elements. You can also customize your flipbooks with various templates, themes, backgrounds, effects and settings.

    -

    With Kvisoft Flipbook Maker Pro 4, you can:

    -

    Kvisoft Flipbook Maker Pro 4 Full Crackl


    Download File · https://cinurl.com/2uEXOd



    -
      -
    • Make flipbooks for all screens and devices, including desktops, laptops, tablets and smartphones.
    • -
    • Make multilingual flipbooks with support for different languages and fonts.
    • -
    • Add images, videos, audio, links, buttons, shapes and other interactive elements to your flipbooks.
    • -
    • Embed your flipbooks into your website or blog, or share them via email, social media or QR code.
    • -
    • Publish your flipbooks online or offline in various formats, such as HTML, ZIP, EXE, APP, etc.
    • -
    • Manage your flipbooks with a built-in bookcase that links relevant information.
    • -
    • Preview your flipbooks in real-time and zoom in or out for a better reading experience.
    • -
    -

    Kvisoft Flipbook Maker Pro 4 is easy to use and has a user-friendly interface. You can import your files with a few clicks and edit them with drag-and-drop tools. You can also choose from a wide range of pre-designed templates or create your own from scratch. You can preview your flipbooks before publishing them and make any changes as needed.

    -

    Kvisoft Flipbook Maker Pro 4 is a powerful tool for creating interactive digital publications that can engage your audience and boost your business. You can download it from the official website or use the full crack version to unlock all the features. However, we do not recommend using the crack version as it may contain viruses or malware that can harm your computer. It is better to purchase the software from the official website and enjoy its benefits legally.

    - -

    How to Use Kvisoft Flipbook Maker Pro 4

    -

    Kvisoft Flipbook Maker Pro 4 is easy to use and has a user-friendly interface. You can follow these simple steps to create your own flipbooks:

    -
      -
    1. Download and install Kvisoft Flipbook Maker Pro 4 from the official website or use the full crack version (not recommended).
    2. -
    3. Launch the software and click on "Create New" to start a new project.
    4. -
    5. Choose the file type you want to import, such as PDF, Word, Excel, PowerPoint, etc. You can also import images, SWF, flash videos and other files.
    6. -
    7. Select the file you want to import and click on "Open". You can also drag and drop the file into the software.
    8. -
    9. Edit your flipbook with various tools and options. You can add pages, delete pages, crop pages, rotate pages, etc. You can also add images, videos, audio, links, buttons, shapes and other interactive elements to your flipbook. You can customize your flipbook with various templates, themes, backgrounds, effects and settings.
    10. -
    11. Preview your flipbook in real-time and zoom in or out for a better reading experience. You can also check the page-flipping animation and the media elements.
    12. -
    13. Publish your flipbook online or offline in various formats, such as HTML, ZIP, EXE, APP, etc. You can also embed your flipbook into your website or blog, or share it via email, social media or QR code.
    14. -
    15. Manage your flipbooks with a built-in bookcase that links relevant information.
    16. -
    -

    Kvisoft Flipbook Maker Pro 4 is a powerful tool for creating interactive digital publications that can engage your audience and boost your business. You can download it from the official website or use the full crack version to unlock all the features. However, we do not recommend using the crack version as it may contain viruses or malware that can harm your computer. It is better to purchase the software from the official website and enjoy its benefits legally.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/models/fpn_uniformer.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/models/fpn_uniformer.py deleted file mode 100644 index 8aae98c5991055bfcc08e82ccdc09f8b1d9f8a8d..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/models/fpn_uniformer.py +++ /dev/null @@ -1,35 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[3, 4, 8, 3], - head_dim=64, - mlp_ratio=4., - qkv_bias=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1), - neck=dict( - type='FPN', - in_channels=[64, 128, 320, 512], - out_channels=256, - num_outs=4), - decode_head=dict( - type='FPNHead', - in_channels=[256, 256, 256, 256], - in_index=[0, 1, 2, 3], - feature_strides=[4, 8, 16, 32], - channels=128, - dropout_ratio=0.1, - num_classes=150, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole') -) diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py deleted file mode 100644 index b37c79bed4ef9fd8913715e62dbe3fc5cafdc3aa..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import pickle - -from .base import BaseFileHandler - - -class PickleHandler(BaseFileHandler): - - str_like = False - - def load_from_fileobj(self, file, **kwargs): - return pickle.load(file, **kwargs) - - def load_from_path(self, filepath, **kwargs): - return super(PickleHandler, self).load_from_path( - filepath, mode='rb', **kwargs) - - def dump_to_str(self, obj, **kwargs): - kwargs.setdefault('protocol', 2) - return pickle.dumps(obj, **kwargs) - - def dump_to_fileobj(self, obj, file, **kwargs): - kwargs.setdefault('protocol', 2) - pickle.dump(obj, file, **kwargs) - - def dump_to_path(self, obj, filepath, **kwargs): - super(PickleHandler, self).dump_to_path( - obj, filepath, mode='wb', **kwargs) diff --git a/spaces/szukevin/VISOR-GPT/train/scripts/prepare_s2t_data_manifest.py b/spaces/szukevin/VISOR-GPT/train/scripts/prepare_s2t_data_manifest.py deleted file mode 100644 index 3d6f891e5be192d8f6f815df7bea8eb9c11bb794..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/scripts/prepare_s2t_data_manifest.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python3 - -""" -Helper script to generate speech2text dataset -""" - -import argparse -import os -import glob -import random - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("root") - parser.add_argument("--output-file", required=True) - parser.add_argument("--for_finetune", action='store_true') - parser.add_argument("--sample-ratio", type=float, default=1.0) - parser.add_argument("--ext", default="flac") - parser.add_argument("--seed", type=int, default=7) - args = parser.parse_args() - - rand = random.Random(args.seed) - os.makedirs(os.path.dirname(os.path.realpath(args.output_file)), exist_ok=True) - - dir_path = os.path.realpath(args.root) - search_path = os.path.join(dir_path, "**/*." + args.ext) - - transcriptions = {} - - with open(args.output_file, "w") as out_file: - if args.for_finetune: - print("text" + "\t" + "wav_path", file=out_file) - for fname in glob.iglob(search_path, recursive=True): - if rand.random() > args.sample_ratio: - continue - file_path = os.path.realpath(fname) - - dir = os.path.dirname(file_path) - if dir not in transcriptions: - parts = dir.split(os.path.sep) - trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt" - path = os.path.join(args.root, dir, trans_path) - assert os.path.exists(path) - texts = {} - with open(path, "r") as trans_f: - for tline in trans_f: - items = tline.strip().split() - texts[items[0]] = " ".join(items[1:]).lower() - transcriptions[dir] = texts - part = os.path.basename(file_path).split(".")[0] - assert part in transcriptions[dir] - print( - transcriptions[dir][part] + "\t" + file_path, - file=out_file - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/taesiri/ChatGPT-ImageCaptioner/detic/data/custom_dataset_dataloader.py b/spaces/taesiri/ChatGPT-ImageCaptioner/detic/data/custom_dataset_dataloader.py deleted file mode 100644 index 8f8d6817704026796d2c2f457fe2624800693267..0000000000000000000000000000000000000000 --- a/spaces/taesiri/ChatGPT-ImageCaptioner/detic/data/custom_dataset_dataloader.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Part of the code is from https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet/data/multi_dataset_dataloader.py (Apache-2.0 License) -import copy -import logging -import numpy as np -import operator -import torch -import torch.utils.data -import json -from detectron2.utils.comm import get_world_size -from detectron2.utils.logger import _log_api_usage, log_first_n - -from detectron2.config import configurable -from detectron2.data import samplers -from torch.utils.data.sampler import BatchSampler, Sampler -from detectron2.data.common import DatasetFromList, MapDataset -from detectron2.data.dataset_mapper import DatasetMapper -from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader -from detectron2.data.samplers import TrainingSampler, RepeatFactorTrainingSampler -from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram -from detectron2.data.build import filter_images_with_only_crowd_annotations -from detectron2.data.build import filter_images_with_few_keypoints -from detectron2.data.build import check_metadata_consistency -from detectron2.data.catalog import MetadataCatalog, DatasetCatalog -from detectron2.utils import comm -import itertools -import math -from collections import defaultdict -from typing import Optional - - -def _custom_train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): - sampler_name = cfg.DATALOADER.SAMPLER_TRAIN - if 'MultiDataset' in sampler_name: - dataset_dicts = get_detection_dataset_dicts_with_source( - cfg.DATASETS.TRAIN, - filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON else 0, - proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, - ) - else: - dataset_dicts = get_detection_dataset_dicts( - cfg.DATASETS.TRAIN, - filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON else 0, - proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, - ) - - if mapper is None: - mapper = DatasetMapper(cfg, True) - - if sampler is not None: - pass - elif sampler_name == "TrainingSampler": - sampler = TrainingSampler(len(dataset)) - elif sampler_name == "MultiDatasetSampler": - sampler = MultiDatasetSampler( - dataset_dicts, - dataset_ratio = cfg.DATALOADER.DATASET_RATIO, - use_rfs = cfg.DATALOADER.USE_RFS, - dataset_ann = cfg.DATALOADER.DATASET_ANN, - repeat_threshold = cfg.DATALOADER.REPEAT_THRESHOLD, - ) - elif sampler_name == "RepeatFactorTrainingSampler": - repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( - dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD - ) - sampler = RepeatFactorTrainingSampler(repeat_factors) - else: - raise ValueError("Unknown training sampler: {}".format(sampler_name)) - - return { - "dataset": dataset_dicts, - "sampler": sampler, - "mapper": mapper, - "total_batch_size": cfg.SOLVER.IMS_PER_BATCH, - "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING, - "num_workers": cfg.DATALOADER.NUM_WORKERS, - 'multi_dataset_grouping': cfg.DATALOADER.MULTI_DATASET_GROUPING, - 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE, - 'dataset_bs': cfg.DATALOADER.DATASET_BS, - 'num_datasets': len(cfg.DATASETS.TRAIN) - } - - -@configurable(from_config=_custom_train_loader_from_config) -def build_custom_train_loader( - dataset, *, mapper, sampler, - total_batch_size=16, - aspect_ratio_grouping=True, - num_workers=0, - num_datasets=1, - multi_dataset_grouping=False, - use_diff_bs_size=False, - dataset_bs=[] - ): - """ - Modified from detectron2.data.build.build_custom_train_loader, but supports - different samplers - """ - if isinstance(dataset, list): - dataset = DatasetFromList(dataset, copy=False) - if mapper is not None: - dataset = MapDataset(dataset, mapper) - if sampler is None: - sampler = TrainingSampler(len(dataset)) - assert isinstance(sampler, torch.utils.data.sampler.Sampler) - if multi_dataset_grouping: - return build_multi_dataset_batch_data_loader( - use_diff_bs_size, - dataset_bs, - dataset, - sampler, - total_batch_size, - num_datasets=num_datasets, - num_workers=num_workers, - ) - else: - return build_batch_data_loader( - dataset, - sampler, - total_batch_size, - aspect_ratio_grouping=aspect_ratio_grouping, - num_workers=num_workers, - ) - - -def build_multi_dataset_batch_data_loader( - use_diff_bs_size, dataset_bs, - dataset, sampler, total_batch_size, num_datasets, num_workers=0 -): - """ - """ - world_size = get_world_size() - assert ( - total_batch_size > 0 and total_batch_size % world_size == 0 - ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( - total_batch_size, world_size - ) - - batch_size = total_batch_size // world_size - data_loader = torch.utils.data.DataLoader( - dataset, - sampler=sampler, - num_workers=num_workers, - batch_sampler=None, - collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements - worker_init_fn=worker_init_reset_seed, - ) # yield individual mapped dict - if use_diff_bs_size: - return DIFFMDAspectRatioGroupedDataset( - data_loader, dataset_bs, num_datasets) - else: - return MDAspectRatioGroupedDataset( - data_loader, batch_size, num_datasets) - - -def get_detection_dataset_dicts_with_source( - dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None -): - assert len(dataset_names) - dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names] - for dataset_name, dicts in zip(dataset_names, dataset_dicts): - assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) - - for source_id, (dataset_name, dicts) in \ - enumerate(zip(dataset_names, dataset_dicts)): - assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) - for d in dicts: - d['dataset_source'] = source_id - - if "annotations" in dicts[0]: - try: - class_names = MetadataCatalog.get(dataset_name).thing_classes - check_metadata_consistency("thing_classes", dataset_name) - print_instances_class_histogram(dicts, class_names) - except AttributeError: # class names are not available for this dataset - pass - - assert proposal_files is None - - dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) - - has_instances = "annotations" in dataset_dicts[0] - if filter_empty and has_instances: - dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) - if min_keypoints > 0 and has_instances: - dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) - - return dataset_dicts - - -class MultiDatasetSampler(Sampler): - def __init__( - self, - dataset_dicts, - dataset_ratio, - use_rfs, - dataset_ann, - repeat_threshold=0.001, - seed: Optional[int] = None, - ): - """ - """ - sizes = [0 for _ in range(len(dataset_ratio))] - for d in dataset_dicts: - sizes[d['dataset_source']] += 1 - print('dataset sizes', sizes) - self.sizes = sizes - assert len(dataset_ratio) == len(sizes), \ - 'length of dataset ratio {} should be equal to number if dataset {}'.format( - len(dataset_ratio), len(sizes) - ) - if seed is None: - seed = comm.shared_random_seed() - self._seed = int(seed) - self._rank = comm.get_rank() - self._world_size = comm.get_world_size() - - self.dataset_ids = torch.tensor( - [d['dataset_source'] for d in dataset_dicts], dtype=torch.long) - - dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \ - for i, (r, s) in enumerate(zip(dataset_ratio, sizes))] - dataset_weight = torch.cat(dataset_weight) - - rfs_factors = [] - st = 0 - for i, s in enumerate(sizes): - if use_rfs[i]: - if dataset_ann[i] == 'box': - rfs_func = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency - else: - rfs_func = repeat_factors_from_tag_frequency - rfs_factor = rfs_func( - dataset_dicts[st: st + s], - repeat_thresh=repeat_threshold) - rfs_factor = rfs_factor * (s / rfs_factor.sum()) - else: - rfs_factor = torch.ones(s) - rfs_factors.append(rfs_factor) - st = st + s - rfs_factors = torch.cat(rfs_factors) - - self.weights = dataset_weight * rfs_factors - self.sample_epoch_size = len(self.weights) - - def __iter__(self): - start = self._rank - yield from itertools.islice( - self._infinite_indices(), start, None, self._world_size) - - - def _infinite_indices(self): - g = torch.Generator() - g.manual_seed(self._seed) - while True: - ids = torch.multinomial( - self.weights, self.sample_epoch_size, generator=g, - replacement=True) - nums = [(self.dataset_ids[ids] == i).sum().int().item() \ - for i in range(len(self.sizes))] - yield from ids - - -class MDAspectRatioGroupedDataset(torch.utils.data.IterableDataset): - def __init__(self, dataset, batch_size, num_datasets): - """ - """ - self.dataset = dataset - self.batch_size = batch_size - self._buckets = [[] for _ in range(2 * num_datasets)] - - def __iter__(self): - for d in self.dataset: - w, h = d["width"], d["height"] - aspect_ratio_bucket_id = 0 if w > h else 1 - bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id - bucket = self._buckets[bucket_id] - bucket.append(d) - if len(bucket) == self.batch_size: - yield bucket[:] - del bucket[:] - - -class DIFFMDAspectRatioGroupedDataset(torch.utils.data.IterableDataset): - def __init__(self, dataset, batch_sizes, num_datasets): - """ - """ - self.dataset = dataset - self.batch_sizes = batch_sizes - self._buckets = [[] for _ in range(2 * num_datasets)] - - def __iter__(self): - for d in self.dataset: - w, h = d["width"], d["height"] - aspect_ratio_bucket_id = 0 if w > h else 1 - bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id - bucket = self._buckets[bucket_id] - bucket.append(d) - if len(bucket) == self.batch_sizes[d['dataset_source']]: - yield bucket[:] - del bucket[:] - - -def repeat_factors_from_tag_frequency(dataset_dicts, repeat_thresh): - """ - """ - category_freq = defaultdict(int) - for dataset_dict in dataset_dicts: - cat_ids = dataset_dict['pos_category_ids'] - for cat_id in cat_ids: - category_freq[cat_id] += 1 - num_images = len(dataset_dicts) - for k, v in category_freq.items(): - category_freq[k] = v / num_images - - category_rep = { - cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq)) - for cat_id, cat_freq in category_freq.items() - } - - rep_factors = [] - for dataset_dict in dataset_dicts: - cat_ids = dataset_dict['pos_category_ids'] - rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0) - rep_factors.append(rep_factor) - - return torch.tensor(rep_factors, dtype=torch.float32) \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Downloadkitabfadhilahamalpdf.md b/spaces/terfces0erbo/CollegeProjectV2/Downloadkitabfadhilahamalpdf.md deleted file mode 100644 index f1a352931917cc659bdc40708feca3eaa261ffc0..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Downloadkitabfadhilahamalpdf.md +++ /dev/null @@ -1,84 +0,0 @@ -

    downloadkitabfadhilahamalpdf


    Download 🗸 https://bytlly.com/2uGjc3



    - -[OK]( - -# Review the script - -## Requirement - -- [ ] Knowledge on creating functions and classes in Python - -- [ ] Knowledge on terminal - -- [ ] [Easy instructions]( - -- [ ] [Easy installation]( - -## Set up a Python virtualenv, pip - -``` - -cd ~ - -git clone - -cd python-scripts - -virtualenv -p python3 venv - -source venv/bin/activate - -pip install -U pip - -## Open the source code file - -vim fadhilah.py - -## Modify the script code - -# Import required packages - -import tkinter as tk - -import random - -# Create the main window - -def main(): - - root = tk.Tk() - - root.geometry('400x200') - - # Create the menu bar - - menu = tk.Menu(root) - - fileMenu = tk.Menu(menu, tearoff=0) - - # Add an item to the menu - - fileMenu.add_command(label='Quit', command=root.quit) - - # Add items for options - - fileMenu.add_cascade(label='Read', menu=fileMenu) - - fileMenu.add_cascade(label='Random', menu=fileMenu) - - # The command option - - root.config(menu=menu) - - # Create a frame to hold all buttons - - frame = tk.Frame(root) - - frame.pack() - - # Create a class object to hold the two items in the frame - - button1 = tk.Button(frame, text="Qu 4fefd39f24
    -
    -
    -

    diff --git a/spaces/terfces0erbo/CollegeProjectV2/Half Life Blue Shift Download Softonic Software.md b/spaces/terfces0erbo/CollegeProjectV2/Half Life Blue Shift Download Softonic Software.md deleted file mode 100644 index dfc6d3fc007ee0386704399c6201ebaa9f513032..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Half Life Blue Shift Download Softonic Software.md +++ /dev/null @@ -1,6 +0,0 @@ -

    half life blue shift download softonic software


    DOWNLOAD 🔗 https://bytlly.com/2uGkqU



    -
    -Adobe acrobat free download for windows 10 ... bridge mobile graphics chipset driver windows 10 · Internet download manager softonic descargar ... de windows en mi pc · Half life blue shift setup · How to download paid app for free on ios 7 ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/terfces0erbo/CollegeProjectV2/Hindi Movie Ready Full Movie With English Subtitles [UPDATED].md b/spaces/terfces0erbo/CollegeProjectV2/Hindi Movie Ready Full Movie With English Subtitles [UPDATED].md deleted file mode 100644 index 730e4dddc44da0e81831def963ff55f9f1d897bc..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Hindi Movie Ready Full Movie With English Subtitles [UPDATED].md +++ /dev/null @@ -1,9 +0,0 @@ - -

    jack reynor as jimmy, left, and myles truitt as eli, right, in the film kin. (alan markfield). in french with english subtitles (1:41) pg. lakshmi directed by: nagesh kukunoor india/104 minutes/hindi with english subtitles trailer:. public film by indie meme.org.

    -

    watching films and series improves our english because:it is fun! it is widely agreed among educators that learning happens more easily when the learner is having fun. watching videos is a passive activity, but you dont realize that your brain is absorbing information. the combination of video, audio and even subtitles stimulates all our learning sensors at the same time, but we dont even realize it because we are absorbed in the film!

    -

    hindi movie ready full movie with english subtitles


    Downloadhttps://bytlly.com/2uGjx0



    -

    720px watch lifemark online (2022) full movies free hd google drive!! lifemark (2022)with english subtitles ready for download, lifemark (2022) 720p, 1080p,. ready (full movie) salman khan, asin anees bazmee, pritam, dsp bhushan kumar,bodyguard 2011 with english subtitle - action, romantic movie salman khan.

    -

    don't be fooled by the lowly rating of the film. the movie is quite funny, despite the poor rating. the bad reviews have thrown the box office numbers out of whack, which doesn't make much sense when you've got a pretty good movie with the right set of stars. hindi movies have a reputation of low budgets, but this one tries for something different. it adds a nice dose of humour and has some interesting characters, especially the dogs.

    -

    download movies in mp3 stream #tamil_hindi_movies. watch movies online stream for free mp3 free mp4 and hdmovies. watch hindi, tamil, hindi movies in hindi, tamil, indian movies in hindi, tamil, indian in mp3 and mp4.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/thak123/Whisper-Konkani/README.md b/spaces/thak123/Whisper-Konkani/README.md deleted file mode 100644 index 9d359635bb96ecc73b0e4e6a12d1a1ee565fdbb1..0000000000000000000000000000000000000000 --- a/spaces/thak123/Whisper-Konkani/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Whisper Konkani -emoji: 📉 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.28.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Encase Free Version NEW.md b/spaces/tialenAdioni/chat-gpt-api/logs/Encase Free Version NEW.md deleted file mode 100644 index 92555bc78b4e30728266f4394244edbf6a0d3b0e..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Encase Free Version NEW.md +++ /dev/null @@ -1,18 +0,0 @@ - -

    How to Use EnCase Free Version for Digital Forensics

    -

    EnCase is a popular digital forensics software that helps investigators find and analyze digital evidence from various sources, such as computers, mobile devices, cloud applications and more. EnCase is widely used by law enforcement, government agencies, corporations and legal professionals for digital investigations and forensics. However, EnCase is also a costly software that requires a license to use. If you are looking for a way to use EnCase for free, you might want to try some of the options below.

    -

    OpenText EnCase Forensic

    -

    OpenText EnCase Forensic is the latest version of EnCase that offers advanced features and capabilities for digital forensics. OpenText EnCase Forensic can process evidence up to 75 percent faster than competing products, support multiple evidence types within a single case, and produce court-accepted reports. OpenText EnCase Forensic also supports comprehensive artifact collection, extensive device support, AI and ML support, and more.

    -

    encase free version


    Download File ✔✔✔ https://urlcod.com/2uK3th



    -

    If you want to use OpenText EnCase Forensic for free, you can request a free trial from the official website. You will need to fill out a form with your contact information and details about your organization and use case. You will then receive an email with instructions on how to download and install the software. The free trial lasts for 30 days and allows you to use all the features of OpenText EnCase Forensic.

    -

    EnParse - 30-Day Free Trial

    -

    EnParse is a tool that works with EnCase to help you find what is in multiple evidence files at once without full export. EnParse can parse various types of data, such as emails, chats, documents, images, videos, etc., and generate useful reports for your clients. EnParse can also filter and sort the data by various criteria, such as date, sender, recipient, keyword, hash, etc.

    -

    If you want to use EnParse for free, you can download a 30-day free trial from the OpenText App Store. You will need to create an account and log in to access the download link. You will also need to have EnCase installed on your computer. The free trial allows you to use all the features of EnParse with no limitations.

    -

    EnCase Endpoint Security

    -

    EnCase Endpoint Security is an endpoint detection and response (EDR) solution that helps security analysts detect, analyze, triage and respond to malicious attacks. EnCase Endpoint Security provides comprehensive endpoint visibility, continuous monitoring, integrated threat intelligence and powerful remediation capabilities.

    -

    If you want to use EnCase Endpoint Security for free, you can request a free demo from the official website. You will need to fill out a form with your contact information and details about your organization and security challenges. You will then be contacted by an OpenText representative who will schedule a live demo with you. The free demo will show you how EnCase Endpoint Security works and how it can benefit your security operations.

    -

    Conclusion

    -

    EnCase is a powerful digital forensics software that can help you find and analyze digital evidence from various sources. However, EnCase is also a costly software that requires a license to use. If you are looking for a way to use EnCase for free, you can try some of the options above, such as requesting a free trial of OpenText EnCase Forensic or EnParse, or requesting a free demo of EnCase Endpoint Security. These options will allow you to use some of the features and capabilities of EnCase for a limited time without paying anything.

    -

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Free Download of Code Composer Studio v3.3 - An IDE for TIs Microcontrollers and Processors.md b/spaces/tialenAdioni/chat-gpt-api/logs/Free Download of Code Composer Studio v3.3 - An IDE for TIs Microcontrollers and Processors.md deleted file mode 100644 index f74467442c49932e9347e88b18de6ccfea5bf5d7..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Free Download of Code Composer Studio v3.3 - An IDE for TIs Microcontrollers and Processors.md +++ /dev/null @@ -1,130 +0,0 @@ -
    -

    Darksiders 2: Deathinitive Edition (Patch 2.0.1.3) (GOG): A Review of the Latest Update

    - -

    Darksiders 2: Deathinitive Edition is a remastered version of the original Darksiders 2, a hack and slash action-adventure game that follows the story of Death, one of the four horsemen of the Apocalypse. The game was released in 2015 by THQ Nordic and Gunfire Games, and it includes all the DLCs and improvements that were made for the original game. The game is available on various platforms, including GOG, a digital distribution platform that offers DRM-free games.

    -

    Darksiders 2: Deathinitive Edition (Patch 2.0.1.3) (GOG)


    DOWNLOAD ✏ ✏ ✏ https://urlcod.com/2uK3ZN



    - -

    In this article, we will review the latest update for Darksiders 2: Deathinitive Edition on GOG, which is patch 2.0.1.3. We will look at what the patch does, how to download and install it, and what are the benefits of playing the game with the patch.

    - -

    What Does Patch 2.0.1.3 Do?

    - -

    Patch 2.0.1.3 is a minor update that fixes some bugs and improves some aspects of the game. According to the patch notes on SteamDB, a website that tracks Steam data, the patch does the following:

    - -
      -
    • Fixes an issue where some users could not start the game due to missing files.
    • -
    • Fixes an issue where some users could not access the Abyssal Forge DLC due to a corrupted save file.
    • -
    • Fixes an issue where some users could not see their achievements on GOG Galaxy.
    • -
    • Fixes an issue where some users could not use their controller on GOG Galaxy.
    • -
    • Improves the performance and stability of the game.
    • -
    - -

    The patch does not add any new content or features to the game, but it makes the game more playable and enjoyable for GOG users.

    - -

    How to Download and Install Patch 2.0.1.3?

    - -

    If you have Darksiders 2: Deathinitive Edition on GOG, you can download and install patch 2.0.1.3 easily and automatically through GOG Galaxy, the client software for GOG games. Here are the steps to follow:

    - -
      -
    1. Launch GOG Galaxy and log in to your account.
    2. -
    3. Select Darksiders 2: Deathinitive Edition from your library.
    4. -
    5. Click on the "More" button next to the "Play" button.
    6. -
    7. Select "Manage installation" and then "Verify / Repair".
    8. -
    9. Wait for GOG Galaxy to check your game files and download any missing or corrupted files.
    10. -
    11. Once the process is done, click on the "Play" button and enjoy the game with patch 2.0.1.3.
    12. -
    - -

    If you do not have GOG Galaxy or prefer to download and install patch 2.0.1.3 manually, you can also do so by following these steps:

    - -
      -
    1. Go to this link and download the patch file (darksiders_ii_deathinitive_edition_patch_20151106_20161219_12187.zip).
    2. -
    3. Extract the zip file and locate the setup file (setup_darksiders_ii_deathinitive_edition_2.0.1.3.exe).
    4. -
    5. Run the setup file and follow the instructions to install the patch.
    6. -
    7. Make sure you install the patch in the same folder where you have installed Darksiders 2: Deathinitive Edition.
    8. -
    9. Once the installation is done, launch the game and enjoy it with patch 2.0.1.3.
    10. -
    - -

    What Are the Benefits of Playing Darksiders 2: Deathinitive Edition with Patch 2.0.1.3?

    - -

    Playing Darksiders 2: Deathinitive Edition with patch 2.0.1.3 has several benefits for GOG users, such as:

    - -
      -
    • You can start and play the game without any errors or missing files.
    • -
    • You can access and enjoy all the DLCs that are included in the game, such as The Abyssal Forge, The Demon Lord Belial, Death Rides, Angel of Death, Deadly Despair, Shadow of Death, Mortis Pack, Rusanov's Axe, Van Der Schmash Hammer, Fletcher's Crow Hammer, Mace Maximus, Argul's Tomb.
    • -
    • You can track and unlock your achievements on GOG Galaxy.
    • -
    • You can use your controller on GOG Galaxy without any issues.
    • -
    • You can experience a smoother and more stable gameplay with improved performance.
    • -
    - -

    Darksiders 2: Deathinitive Edition with patch 2.0.1.3 is a great way to enjoy this epic hack and slash adventure that runs parallel to the events in the original Darksiders game.

    - -

    Darksiders 2: Deathinitive Edition (Patch 2.0.1.3) (GOG): Conclusion

    - -

    Darksiders 2: Deathinitive Edition is a remastered version of Darksiders 2, a hack and slash action-adventure game that follows the story of Death, one of the four horsemen of the Apocalypse. The game is available on various platforms, including GOG, a digital distribution platform that offers DRM-free games.

    - -

    Patch 2.0.1.3 is a minor update that fixes some bugs and improves some aspects of the game for GOG users. You can download and install it easily and automatically through GOG Galaxy or manually through a link provided in this article.

    -

    Darksiders II: Deathinitive Edition on GOG.com[^1^]
    -Darksiders 2 Deathinitive Edition Torrent Download[^2^]
    -Darksiders 2 Deathinitive Edition Mods and Community[^3^]
    -Darksiders II: Deathinitive Edition Free GOG PC Game[^4^]
    -Darksiders 2 Deathinitive Edition Hack and Slash Adventure
    -Darksiders II: Deathinitive Edition Redeem War
    -Darksiders 2 Deathinitive Edition All DLCs Included
    -Darksiders II: Deathinitive Edition Improved Graphic Render Engine
    -Darksiders 2 Deathinitive Edition Reworked Game Balancing
    -Darksiders II: Deathinitive Edition Play as Death
    -Darksiders 2 Deathinitive Edition Epic Universe
    -Darksiders II: Deathinitive Edition Heaven and Hell Environments
    -Darksiders 2 Deathinitive Edition Player Choice and Customization
    -Darksiders II: Deathinitive Edition Replay-ability
    -Darksiders 2 Deathinitive Edition Traversal
    -Darksiders II: Deathinitive Edition Joe Mad Style
    -Darksiders 2 Deathinitive Edition Dual-Wield Weapons
    -Darksiders II: Deathinitive Edition Upgradable Armor
    -Darksiders 2 Deathinitive Edition Skill Trees
    -Darksiders II: Deathinitive Edition Mount Despair
    -Darksiders 2 Deathinitive Edition Maker Armor Set
    -Darksiders II: Deathinitive Edition The Abyssal Forge DLC
    -Darksiders 2 Deathinitive Edition The Demon Lord Belial DLC
    -Darksiders II: Deathinitive Edition Death Rides DLC
    -Darksiders 2 Deathinitive Edition Angel of Death DLC
    -Darksiders II: Deathinitive Edition Deadly Despair DLC
    -Darksiders 2 Deathinitive Edition Shadow of Death DLC
    -Darksiders II: Deathinitive Edition Mortis Pack DLC
    -Darksiders 2 Deathinitive Edition Rusanov's Axe DLC
    -Darksiders II: Deathinitive Edition Van Der Schmash Hammer DLC
    -Darksiders 2 Deathinitive Edition Fletcher's Crow Hammer DLC
    -Darksiders II: Deathinitive Edition Mace Maximus DLC
    -Darksiders 2 Deathinitive Edition Argul's Tomb DLC
    -Darksiders II: Deathinitive Edition Native 1080p Resolution
    -Darksiders 2 Deathinitive Edition The White City Location
    -Darksiders II: Deathinitive Edition The Eternal Throne Location
    -Darksiders 2 Deathinitive Edition Lords of the Dead Characters
    -Darksiders II: Deathinitive Edition Huge Iconic Scythe Weapon
    -Darksiders 2 Deathinitive Edition Netherworld Gameworld
    -Darksiders II: Deathinitive Edition Side Quests Options
    -Darksiders 2 Deathinitive Edition Leveling System Feature
    -Darksiders II: Deathinitive Edition Endless Equipment Combinations Feature
    -Darksiders 2 Deathinitive Edition Nimble and Agile Character Traits
    -Darksiders II: Deathinitive Edition Melee and Ranged Attacks Options
    -Darksiders 2 save for deathitive edition of the game[^3^]
    -How to install darksider ii deathitive edition on pc guide
    -Best darksider ii deathitive edition mods for pc
    -How to fix darksider ii deathitive edition bugs and crashes
    -How to download darksider ii deathitive edition torrent file

    - -

    Playing Darksiders 2: Deathinitive Edition with patch 2.0.1.3 has several benefits for GOG users, such as starting and playing the game without any errors or missing files, accessing and enjoying all the DLCs that are included in the game, tracking and unlocking your achievements on GOG Galaxy, using your controller on GOG Galaxy without any issues, and experiencing a smoother and more stable gameplay with improved performance.

    - -

    Darksiders 2: Deathinitive Edition with patch 2.0.1.3 is a great way to enjoy this epic hack and slash adventure that runs parallel to the events in the original Darksiders game.

    -

    Darksiders 2: Deathinitive Edition (Patch 2.0.1.3) (GOG): Conclusion

    - -

    Darksiders 2: Deathinitive Edition is a remastered version of Darksiders 2, a hack and slash action-adventure game that follows the story of Death, one of the four horsemen of the Apocalypse. The game is available on various platforms, including GOG, a digital distribution platform that offers DRM-free games.

    - -

    Patch 2.0.1.3 is a minor update that fixes some bugs and improves some aspects of the game for GOG users. You can download and install it easily and automatically through GOG Galaxy or manually through a link provided in this article.

    - -

    Playing Darksiders 2: Deathinitive Edition with patch 2.0.1.3 has several benefits for GOG users, such as starting and playing the game without any errors or missing files, accessing and enjoying all the DLCs that are included in the game, tracking and unlocking your achievements on GOG Galaxy, using your controller on GOG Galaxy without any issues, and experiencing a smoother and more stable gameplay with improved performance.

    - -

    Darksiders 2: Deathinitive Edition with patch 2.0.1.3 is a great way to enjoy this epic hack and slash adventure that runs parallel to the events in the original Darksiders game.

    - -This is a clear and concise conclusion that wraps up the article and leaves a positive impression on the reader. There is no need to write another conclusion for the article. If you want me to write another article with a different keyword, please let me know.

    679dcb208e
    -
    -
    \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download 20 Minutes Till DawnPremium for Android - Free Survival Game with Lovecraftian Monsters.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download 20 Minutes Till DawnPremium for Android - Free Survival Game with Lovecraftian Monsters.md deleted file mode 100644 index 844a0b6003e0fe3da81130d179e7a181b1e38bb2..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download 20 Minutes Till DawnPremium for Android - Free Survival Game with Lovecraftian Monsters.md +++ /dev/null @@ -1,94 +0,0 @@ - -

    20 Minutes Till Dawn: A Survival Game That Will Keep You on Your Toes

    -

    Do you love shooting games that challenge your skills and reflexes? Do you enjoy fighting against hordes of monsters from Lovecraftian mythology? Do you want to experience a thrilling and ghoulish adventure that will keep you hooked for hours? If you answered yes to any of these questions, then you should definitely check out 20 Minutes Till Dawn, a premium survival game that is available for Android devices.

    -

    20 minutes till dawn apk free


    DOWNLOADhttps://bltlly.com/2uOo0Q



    -

    Introduction

    -

    What is 20 Minutes Till Dawn?

    -

    20 Minutes Till Dawn is a shoot 'em up game that pits you against an endless horde of enemies for 20 minutes. You have to survive until dawn by using various weapons and characters that have unique abilities. The game is developed by Erabit Studios, a indie game studio that specializes in creating fun and engaging games for mobile platforms.

    -

    Why should you play it?

    -

    There are many reasons why you should play 20 Minutes Till Dawn. Here are some of them:

    -
      -
    • The game has stunning graphics and sound effects that create a immersive and atmospheric environment.
    • -
    • The game has a simple and intuitive control system that allows you to move, shoot, and use abilities with ease.
    • -
    • The game has a high replay value, as you can try different strategies and combinations of weapons and characters to survive longer.
    • -
    • The game has a leaderboard system that lets you compete with other players around the world and see how you rank among them.
    • -
    • The game has a premium version that gives you access to all the features and content without any ads or in-app purchases.
    • -
    -

    Features of 20 Minutes Till Dawn

    -

    Quickplay Mode

    -

    The game has a quickplay mode that lets you jump into the action right away. You can choose from over 10 different characters, each with their own appearance and abilities. You can also choose from over 20 different weapons, each with their own strengths and weaknesses. You can then start the game and see how long you can survive against the relentless monsters.

    -

    Different Characters

    -

    The game has a variety of characters that you can unlock and use in the game. Each character has a unique ability that can give you an edge in battle. For example, Diamond has high HP and defensive capabilities, Scarlett can burn enemies with a wave of fire, and Raven can summon crows to attack enemies. You can experiment with different characters and see which one suits your playstyle best.

    -

    Weapon Upgrades

    -

    The game has a weapon upgrade system that lets you improve your weapons as you level up. You can choose from different upgrades that enhance your weapons' damage, fire rate, reload speed, accuracy, and more. You can also unlock synergies that give you additional bonuses when you have certain combinations of upgrades. For example, Mini Clip reduces your reload time and increases your damage when you have Fan Fire and Fresh Clip upgrades.

    -

    20 minutes till dawn premium apk download
    -20 minutes till dawn roguelike shooting game
    -20 minutes till dawn android game free
    -20 minutes till dawn full version apk
    -20 minutes till dawn survival game
    -20 minutes till dawn mod apk unlimited money
    -20 minutes till dawn lovecraftian monsters
    -20 minutes till dawn erabit studios
    -20 minutes till dawn apk for android
    -20 minutes till dawn hack apk free
    -20 minutes till dawn shoot em up game
    -20 minutes till dawn unique builds every run
    -20 minutes till dawn apk latest version
    -20 minutes till dawn offline game free
    -20 minutes till dawn gameplay and review
    -20 minutes till dawn apkcombo download
    -20 minutes till dawn mob.org free game
    -20 minutes till dawn apk file download
    -20 minutes till dawn weapons and characters
    -20 minutes till dawn upgrades and synergies
    -20 minutes till dawn sword runes and shield runes
    -20 minutes till dawn apk mirror download
    -20 minutes till dawn apkpure free game
    -20 minutes till dawn apk mod menu
    -20 minutes till dawn cheats and tips
    -20 minutes till dawn discord and twitter
    -20 minutes till dawn facebook and email
    -20 minutes till dawn apk no ads free
    -20 minutes till dawn best character and weapon
    -20 minutes till dawn how to survive the night
    -20 minutes till dawn strategy and guide
    -20 minutes till dawn apk obb download free
    -20 minutes till dawn new update and features
    -20 minutes till dawn bugs and fixes
    -20 minutes till dawn support and feedback
    -20 minutes till dawn wiki and faq
    -20 minutes till dawn trailer and screenshots
    -20 minutes till dawn rating and reviews
    -20 minutes till dawn similar games free
    -20 minutes till dawn google play store link

    -

    Synergies

    -

    The game has a synergy system that lets you access powerful effects when you have certain combinations of upgrades. You can check the synergies at any time by clicking the "II" button in battle. You can then choose the right upgrade tree to unlock outstanding synergies that can boost your performance in the game. For example, Fire and Ice gives you a chance to freeze or burn enemies when you have Frost and Fire upgrades.

    -

    Sword and Shield Runes

    -

    The game has a sword and shield rune system that lets you customize your melee and defensive abilities. You can collect runes from chests and enemies, and equip them on your sword and shield. Each rune has a different effect, such as increasing your damage, healing you, or stunning enemies. You can also combine runes to create more powerful effects. For example, combining a Fire rune and a Shock rune gives you a Flame Shock rune that deals fire and electric damage to enemies.

    -

    How to download 20 Minutes Till Dawn APK for free?

    -

    If you want to play 20 Minutes Till Dawn on your Android device, you can download the APK file for free from a reliable source. Here are the steps to do so:

    -

    Step 1: Find a reliable source

    -

    There are many websites that offer APK files for free, but not all of them are safe and trustworthy. You should look for a source that has positive reviews, ratings, and feedback from other users. You should also check the file size, version, and compatibility of the APK file before downloading it.

    -

    Step 2: Download the APK file

    -

    Once you have found a reliable source, you can click on the download button or link to start downloading the APK file. You may need to allow unknown sources in your device settings to download the file from a third-party source. You should also scan the file with an antivirus software before installing it.

    -

    Step 3: Install the APK file

    -

    After downloading the APK file, you can locate it in your device's file manager or downloads folder. You can then tap on the file to start installing it. You may need to grant some permissions to the app to complete the installation process.

    -

    Step 4: Enjoy the game

    -

    Once the installation is done, you can launch the game from your app drawer or home screen. You can then enjoy playing 20 Minutes Till Dawn on your Android device for free.

    -

    Conclusion

    -

    20 Minutes Till Dawn is a survival game that will test your skills and reflexes as you fight against hordes of monsters for 20 minutes. The game has amazing graphics, sound effects, controls, and features that make it fun and engaging. You can choose from different characters, weapons, upgrades, synergies, and runes to customize your gameplay. You can also compete with other players on the leaderboard and see how you rank among them. If you want to play this game on your Android device, you can download the APK file for free from a reliable source by following the steps mentioned above. So what are you waiting for? Download 20 Minutes Till Dawn APK for free today and enjoy this thrilling and ghoulish adventure.

    -

    FAQs

    -
      -
    • Q: How much does 20 Minutes Till Dawn cost?
    • -
    • A: The game has a premium version that costs $1.99 on Google Play Store. However, you can download the APK file for free from a third-party source.
    • -
    • Q: Is 20 Minutes Till Dawn safe to download?
    • -
    • A: Yes, as long as you download the APK file from a reliable source that has positive reviews and ratings. You should also scan the file with an antivirus software before installing it.
    • -
    • Q: What are the minimum requirements to play 20 Minutes Till Dawn?
    • -
    • A: The game requires Android 4.4 or higher and at least 100 MB of free storage space.
    • -
    • Q: How can I contact the developer of 20 Minutes Till Dawn?
    • -
    • A: You can contact Erabit Studios by sending an email to erabitstudios@gmail.com or visiting their website at https://erabitstudios.com/.
    • -
    • Q: Can I play 20 Minutes Till Dawn offline?
    • -
    • A: Yes, you can play the game offline without any internet connection. However, you will need an internet connection to access the leaderboard and see your ranking.
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/IK Multimedia MODO BASS V1.5.0 Incl Keygen.rar 64 Bitl.md b/spaces/tioseFevbu/cartoon-converter/scripts/IK Multimedia MODO BASS V1.5.0 Incl Keygen.rar 64 Bitl.md deleted file mode 100644 index f8aa59244d4ac4f2db9a536e375818869a0da83d..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/IK Multimedia MODO BASS V1.5.0 Incl Keygen.rar 64 Bitl.md +++ /dev/null @@ -1,120 +0,0 @@ -
    -

    IK Multimedia MODO BASS V1.5.0 Incl Keygen.rar 64 Bitl

    -

    If you are looking for a realistic and versatile bass virtual instrument that can recreate virtually every electric bass sound imaginable, you might want to check out IK Multimedia's MODO BASS. MODO BASS is not your typical sample-based instrument; it is a groundbreaking technology that models the entire process of playing bass using real-time modal synthesis. This means that every component that contributes to the unique tonal properties of a bass player playing an instrument has been modeled and synthesized in real time.

    -

    IK Multimedia MODO BASS V1.5.0 Incl Keygen.rar 64 Bitl


    DOWNLOADhttps://urlcod.com/2uHvDX



    -

    In this article, we will explore what makes MODO BASS so unique and powerful. We will also show you how to install and use MODO BASS on your Windows or Mac OS X computer using a keygen file. Finally, we will give you some tips and tricks on how to get the best out of MODO BASS for your bass tracks.

    -

    How MODO BASS works

    -

    MODO BASS is based on a revolutionary technology called real-time modal synthesis. Unlike traditional virtual instruments that use recorded samples of instruments to produce sound, MODO BASS generates sound by recreating the physical properties of a real instrument. Everything that makes an instrument create sound is physically modeled and sound is synthesized in real time.

    -

    This means that MODO BASS can capture the subtle nuances and

    characteristics of a bass player playing an instrument, such as the force applied to the string, the position of the hand, the type of finger used, the interaction of the string with the frets, the resonance of the body and the electronics of the instrument. MODO BASS also allows you to control these parameters in real time, giving you unprecedented control and expression over your bass sound.

    -

    -

    MODO BASS comes with 12 bass models that cover a wide range of bass sounds, from vintage to modern, from classic to exotic. Each model has been carefully crafted to reproduce the sound and feel of a specific bass instrument. You can also customize each model by changing various aspects of the instrument, such as the number and type of strings, the scale length, the tuning, the gauge, the construction, the action, the age, the pickups, the electronics, the EQ and even the piezo. You can also add effects and amps to further shape your tone.

    -

    How to install MODO BASS

    -

    Requirements

    -

    To install and use MODO BASS on your computer, you will need the following requirements:

    -
      -
    • A 64-bit Windows or Mac OS X operating system
    • -
    • A 64-bit compatible host application that supports VST2, VST3, AU or AAX plugin formats
    • -
    • An internet connection for download and authorization
    • -
    • At least 8 GB of RAM
    • -
    • At least 6 GB of hard disk space
    • -
    • An ASIO compatible sound card for Windows or CoreAudio compatible sound card for Mac OS X
    • -
    -

    Steps

    -

    To install and activate MODO BASS on your computer, follow these steps:

    -
      -
    1. Download the MODO BASS installer from IK Multimedia's website or from a trusted source. Make sure you download the correct version for your operating system.
    2. -
    3. Extract the downloaded file using a program like WinRAR or 7-Zip. You should see a folder named MODO BASS with two subfolders: Installer and Keygen.
    4. -
    5. Open the Installer folder and run the setup.exe file for Windows or the MODO BASS.pkg file for Mac OS X. Follow the instructions on the screen to install MODO BASS on your computer.
    6. -
    7. Open the Keygen folder and run the keygen.exe file for Windows or the keygen.app file for Mac OS X. You should see a window with a serial number and an authorization code.
    8. -
    9. Launch your host application and load MODO BASS as a plugin. You should see a window asking you to authorize MODO BASS.
    10. -
    11. Enter the serial number and the authorization code from the keygen window into the corresponding fields in the authorization window. Click on Authorize.
    12. -
    13. You should see a message confirming that MODO BASS has been successfully authorized. Click on OK.
    14. -
    15. You can now use MODO BASS as a plugin in your host application.
    16. -
    -

    How to use MODO BASS

    -

    Interface

    -

    MODO BASS has a simple and intuitive user interface that consists of four main sections: Control, Play Style, Model and FX.

    -

    The Control section is located at the top of the interface and contains global settings for MODO BASS, such as volume, pan, master tune, MIDI channel, polyphony and output mode. You can also access the settings menu, the help menu and the preset browser from this section.

    -

    The Play Style section is located at the left side of the interface and contains settings for the way MODO BASS plays the bass, such as the play style, the hand position, the finger used, the force applied, the muting and the slide. You can also access the MIDI learn function from this section.

    -

    The Model section is located at the center of the interface and contains settings for the bass model selected, such as the model name, the string number, the scale length, the tuning, the gauge, the construction, the action, the age, the pickups, the electronics, the EQ and the piezo. You can also access the model browser from this section.

    -

    The FX section is located at the right side of the interface and contains settings for the effects and amps added to MODO BASS, such as the effect type, the effect parameters, the amp model, the amp parameters and the cabinet type. You can also access the FX browser from this section.

    -

    Models

    -

    MODO BASS comes with 12 bass models that cover a wide range of bass sounds, from vintage to modern, from classic to exotic. Each model has been carefully crafted to reproduce the sound and feel of a specific bass instrument. Here is a brief description of each model:

    -
      -
    • Jazz Bass 60s: A classic Fender Jazz Bass from 1960 with alder body, rosewood fingerboard and single-coil pickups. It has a bright and punchy tone that is ideal for funk, rock and pop.
    • -
    • Jazz Bass 70s: A classic Fender Jazz Bass from 1970 with ash body, maple fingerboard and single-coil pickups. It has a warmer and smoother tone that is ideal for jazz, fusion and soul.
    • -
    • Precision Bass: A classic Fender Precision Bass from 1951 with ash body, maple fingerboard and split-coil pickup. It has a fat and powerful tone that is ideal for blues, rock and metal.
    • -
    • Rickenbacker 4003: A classic Rickenbacker 4003 from 1977 with maple body, maple fingerboard and dual-coil pickups. It has a distinctive and aggressive tone that is ideal for rock, punk and prog.
    • -
    • Lakland 55-94: A modern Lakland 55-94 from 2009 with ash body, maple fingerboard and humbucker pickups. It has a versatile and balanced tone that is ideal for any genre.
    • -
    • Gibson EB-0: A vintage Gibson EB-0 from 1959 with mahogany body, rosewood fingerboard and single-coil pickup. It has a dark and mellow tone that is ideal for blues, country and folk.
    • -
    • Hofner Violin Bass: A vintage Hofner Violin Bass from 1963 with spruce top, maple back and sides, rosewood fingerboard and dual-coil pickups. It has a woody and hollow tone that is ideal for pop, rockabilly and reggae.
    • -
    • Music Man StingRay 5: A modern Music Man StingRay 5 from 2010 with ash body, maple fingerboard and humbucker pickup. It has a crisp and clear tone that is ideal for funk, disco and R&B.
    • -
    • Warwick Streamer: A modern Warwick Streamer from 2002 with bubinga body, wenge fingerboard and dual-coil pickups. It has a rich and deep tone that is ideal for jazz, fusion and ambient.
    • -
    • Fodera Yin Yang: A custom Fodera Yin Yang from 2010 with alder body, ebony fingerboard and humbucker pickups. It has a smooth and articulate tone that is ideal for soloing, tapping and slapping.
    • -
    • Danelectro Longhorn: A vintage Danelectro Longhorn from 1958 with masonite body, rosewood fingerboard and lipstick pickups. It has a twangy and quirky tone that is ideal for surf, rockabilly and indie.
    • -
    • Japan Bass: A custom Japan Bass from 1994 with alder body, rosewood fingerboard and single-coil pickups. It has a bright and snappy tone that is ideal for pop-punk, ska and alternative.
    • -
    -

    Play Styles

    -

    MODO BASS allows you to choose between three play styles: pluck, slap and pick. Each play style has its own characteristics and sound, and you can adjust various parameters to fine-tune your performance. Here is a brief description of each play style:

    -
      -
    • Pluck: This is the most common and natural way of playing bass, using the fingers to pluck the strings. You can control the position of the hand, the finger used, the force applied and the muting of the strings. You can also switch between index and middle finger, or use both for faster playing.
    • -
    • Slap: This is a more aggressive and funky way of playing bass, using the thumb to slap the strings and the fingers to pop them. You can control the position of the hand, the force applied, the muting of the strings and the ratio of slap and pop. You can also switch between thumb and index finger, or use both for faster playing.
    • -
    • Pick: This is a more articulate and bright way of playing bass, using a pick to strike the strings. You can control the position of the hand, the force applied, the muting of the strings and the angle of the pick. You can also switch between upstroke and downstroke, or use both for faster playing.
    • -
    -

    Customization

    -

    MODO BASS allows you to customize various aspects of your bass model, giving you endless possibilities to create your own unique sound. You can access the customization options by clicking on the Model button in the Model section. Here are some of the customization options available:

    -
      -
    • String Number: You can choose between 4, 5 or 6 strings for your bass model.
    • -
    • Scale: You can choose between short, medium or long scale for your bass model.
    • -
    • Tuning: You can tune each string individually or use presets for standard or alternative tunings.
    • -
    • Gauge: You can choose between light, medium or heavy gauge for your strings.
    • -
    • Construction: You can choose between bolt-on, neck-through or set-neck for your bass model.
    • -
    • Action: You can adjust the action of your strings, which affects the playability and intonation of your bass model.
    • -
    • Age: You can adjust the age of your strings, which affects the brightness and sustain of your bass model.
    • -
    • Pickups: You can choose between single-coil, dual-coil or humbucker pickups for your bass model. You can also adjust the position, volume and phase of each pickup.
    • -
    • Electronics: You can choose between passive or active electronics for your bass model. You can also adjust the tone and boost/cut of each frequency band.
    • -
    • EQ: You can apply a 4-band parametric EQ to your bass model. You can adjust the frequency, gain and Q of each band.
    • -
    • Piezo: You can add a piezo pickup to your bass model. You can adjust the volume and tone of the piezo signal.
    • -
    -

    Effects and Amps

    -

    MODO BASS comes with seven bass stomp box effects and two bass amps that you can add to your signal chain to further shape your tone. You can access the effects and amps options by clicking on the FX button in the FX section. Here are some of the effects and amps available:

    -
      -
    • Octaver: This effect adds an octave below or above your original signal, creating a fuller and richer sound. You can adjust the level of the sub and upper octaves, as well as the dry signal.
    • -
    • Envelope Filter: This effect applies a filter that modulates according to the envelope of your signal, creating a funky and expressive sound. You can adjust the sensitivity, range, resonance and mode of the filter.
    • -
    • Chorus: This effect adds a modulated delay to your signal, creating a shimmering and spacious sound. You can adjust the rate, depth, feedback and mix of the chorus.
    • -
    • Compressor: This effect reduces the dynamic range of your signal, creating a more consistent and punchy sound. You can adjust the threshold, ratio, attack, release and gain of the compressor.
    • -
    • Distortion: This effect adds harmonic distortion to your signal, creating a more aggressive and edgy sound. You can adjust the drive, tone, level and type of the distortion.
    • -
    • Phaser: This effect adds a phase-shifted copy of your signal, creating a swirling and psychedelic sound. You can adjust the rate, depth, feedback and mix of the phaser.
    • -
    • Delay: This effect adds a delayed copy of your signal, creating a more ambient and spacious sound. You can adjust the time, feedback, level and sync of the delay.
    • -
    • Bass Amp 1: This amp simulates a classic Ampeg SVT-VR tube bass amp, which has a warm and powerful tone. You can adjust the gain, bass, mid, treble and volume of the amp.
    • -
    • Bass Amp 2: This amp simulates a modern Gallien-Krueger 800RB solid-state bass amp, which has a crisp and clear tone. You can adjust the gain, contour, presence, bass, mid, treble and volume of the amp.
    • -
    -

    How to get the best out of MODO BASS

    -

    MODO BASS is a very versatile and realistic bass virtual instrument that can suit any genre and scenario. However, there are some tips and tricks that can help you get the best out of MODO BASS for your bass tracks. Here are some of them:

    -
      -
    • Experiment with different models and customization options: MODO BASS offers a wide range of bass sounds that you can explore and tweak to your liking. Try different models and see how they sound in your mix. Change various aspects of the instrument and see how they affect the tone and feel. Find the best combination that matches your vision and style.
    • -
    • Use different play styles and expressions: MODO BASS allows you to choose between three play styles: pluck, slap and pick. Each play style has its own characteristics and sound, and you can adjust various parameters to fine-tune your performance. You can also use MIDI controllers or keyboard shortcuts to switch between play styles or add expressions such as slides, bends, vibrato, harmonics or mutes. Use different play styles and expressions to add variety and realism to your bass tracks.
    • -
    • Add effects and amps to shape your tone: MODO BASS comes with seven bass stomp box effects and two bass amps that you can add to your signal chain to further shape your tone. You can also use external effects or amps if you prefer. Use effects and amps to enhance or transform your bass sound according to your genre and mood.
    • -
    • Use MIDI learn to control MODO BASS with your hardware: MODO BASS supports MIDI learn function that allows you to assign any parameter in MODO BASS to any MIDI controller on your hardware device. This way, you can control MODO BASS with your hardware in real time, adding more expression and interactivity to your performance. To use MIDI learn function, click on the MIDI button in the Play Style section and follow the instructions on the screen.
    • -
    • Use presets to get started or inspired: MODO BASS comes with over 300 presets that cover various genres and styles, from rock to jazz, from funk to metal, from pop to reggae. You can access the preset browser by clicking on the Preset button in the Control section. You can use presets to get started quickly or to get inspired by different sounds and settings. You can also create your own presets and save them for later use.
    • -
    -

    Conclusion

    -

    MODO BASS is a revolutionary bass virtual instrument that uses real-time modal synthesis to recreate the sound and feel of a real bass player playing an instrument. It offers 12 bass models that cover a wide range of bass sounds, from vintage to modern, from classic to exotic. It also allows you to customize various aspects of your bass model, such as the string number, scale, tuning, gauge, construction, action, age, pickups, electronics, EQ and piezo. It also comes with seven bass stomp box effects and two bass amps that you can add to your signal chain to further shape your tone. It also allows you to choose between three play styles: pluck, slap and pick. You can also use MIDI controllers or keyboard shortcuts to switch between play styles or add expressions such as slides, bends, vibrato, harmonics or mutes.

    -

    If you are looking for a realistic and versatile bass virtual instrument that can suit any genre and scenario, you might want to check out IK Multimedia's MODO BASS. You can download and install MODO BASS on your Windows or Mac OS X computer using a keygen file. You can also use the tips and tricks we provided in this article to get the best out of MODO BASS for your bass tracks.

    -

    So what are you waiting for? Grab your MODO BASS today and start making some amazing bass sounds!

    -

    FAQs

    -

    Here are some frequently asked questions about MODO BASS:

    -
      -
    • Q: How much does MODO BASS cost?
    • -
    • A: MODO BASS costs $299.99 USD. However, you can get it for a lower price if you are eligible for any of IK Multimedia's special offers or discounts.
    • -
    • Q: Can I use MODO BASS as a standalone application?
    • -
    • A: No, MODO BASS is not a standalone application. It is a plugin that requires a 64-bit compatible host application that supports VST2, VST3, AU or AAX plugin formats.
    • -
    • Q: Can I use MODO BASS with any MIDI controller?
    • -
    • A: Yes, you can use MODO BASS with any MIDI controller that can send MIDI messages to your host application. You can also use the MIDI learn function to assign any parameter in MODO BASS to any MIDI controller on your hardware device.
    • -
    • Q: Can I import my own samples into MODO BASS?
    • -
    • A: No, MODO BASS does not use samples to produce sound. It uses real-time modal synthesis to model the physical properties of a real instrument.
    • -
    • Q: Can I export my MODO BASS presets to another computer?
    • -
    • A: Yes, you can export your MODO BASS presets to another computer by copying the preset files from the MODO BASS folder on your computer and pasting them into the same folder on another computer.
    • -

    b2dd77e56b
    -
    -
    \ No newline at end of file diff --git a/spaces/tomandandy/MusicGen3/audiocraft/data/audio_dataset.py b/spaces/tomandandy/MusicGen3/audiocraft/data/audio_dataset.py deleted file mode 100644 index cf21422ea0059cb2d6553f93e608b8f9fa0d3a50..0000000000000000000000000000000000000000 --- a/spaces/tomandandy/MusicGen3/audiocraft/data/audio_dataset.py +++ /dev/null @@ -1,525 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import copy -from concurrent.futures import ThreadPoolExecutor, Future -from dataclasses import dataclass, fields -from contextlib import ExitStack -import gzip -import json -import logging -import os -from pathlib import Path -import random -import sys -import typing as tp - -import torch -import torch.nn.functional as F - -from .audio import audio_read, audio_info -from .audio_utils import convert_audio -from .zip import PathInZip - -try: - import dora -except ImportError: - dora = None # type: ignore - - -@dataclass(order=True) -class BaseInfo: - - @classmethod - def _dict2fields(cls, dictionary: dict): - return { - field.name: dictionary[field.name] - for field in fields(cls) if field.name in dictionary - } - - @classmethod - def from_dict(cls, dictionary: dict): - _dictionary = cls._dict2fields(dictionary) - return cls(**_dictionary) - - def to_dict(self): - return { - field.name: self.__getattribute__(field.name) - for field in fields(self) - } - - -@dataclass(order=True) -class AudioMeta(BaseInfo): - path: str - duration: float - sample_rate: int - amplitude: tp.Optional[float] = None - weight: tp.Optional[float] = None - # info_path is used to load additional information about the audio file that is stored in zip files. - info_path: tp.Optional[PathInZip] = None - - @classmethod - def from_dict(cls, dictionary: dict): - base = cls._dict2fields(dictionary) - if 'info_path' in base and base['info_path'] is not None: - base['info_path'] = PathInZip(base['info_path']) - return cls(**base) - - def to_dict(self): - d = super().to_dict() - if d['info_path'] is not None: - d['info_path'] = str(d['info_path']) - return d - - -@dataclass(order=True) -class SegmentInfo(BaseInfo): - meta: AudioMeta - seek_time: float - n_frames: int # actual number of frames without padding - total_frames: int # total number of frames, padding included - sample_rate: int # actual sample rate - - -DEFAULT_EXTS = ['.wav', '.mp3', '.flac', '.ogg', '.m4a'] - -logger = logging.getLogger(__name__) - - -def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta: - """AudioMeta from a path to an audio file. - - Args: - file_path (str): Resolved path of valid audio file. - minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). - Returns: - AudioMeta: Audio file path and its metadata. - """ - info = audio_info(file_path) - amplitude: tp.Optional[float] = None - if not minimal: - wav, sr = audio_read(file_path) - amplitude = wav.abs().max().item() - return AudioMeta(file_path, info.duration, info.sample_rate, amplitude) - - -def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta: - """If Dora is available as a dependency, try to resolve potential relative paths - in list of AudioMeta. This method is expected to be used when loading meta from file. - - Args: - m (AudioMeta): Audio meta to resolve. - fast (bool): If True, uses a really fast check for determining if a file is already absolute or not. - Only valid on Linux/Mac. - Returns: - AudioMeta: Audio meta with resolved path. - """ - def is_abs(m): - if fast: - return str(m)[0] == '/' - else: - os.path.isabs(str(m)) - - if not dora: - return m - - if not is_abs(m.path): - m.path = dora.git_save.to_absolute_path(m.path) - if m.info_path is not None and not is_abs(m.info_path.zip_path): - m.info_path.zip_path = dora.git_save.to_absolute_path(m.path) - return m - - -def find_audio_files(path: tp.Union[Path, str], - exts: tp.List[str] = DEFAULT_EXTS, - resolve: bool = True, - minimal: bool = True, - progress: bool = False, - workers: int = 0) -> tp.List[AudioMeta]: - """Build a list of AudioMeta from a given path, - collecting relevant audio files and fetching meta info. - - Args: - path (str or Path): Path to folder containing audio files. - exts (list of str): List of file extensions to consider for audio files. - minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). - progress (bool): Whether to log progress on audio files collection. - workers (int): number of parallel workers, if 0, use only the current thread. - Returns: - List[AudioMeta]: List of audio file path and its metadata. - """ - audio_files = [] - futures: tp.List[Future] = [] - pool: tp.Optional[ThreadPoolExecutor] = None - with ExitStack() as stack: - if workers > 0: - pool = ThreadPoolExecutor(workers) - stack.enter_context(pool) - - if progress: - print("Finding audio files...") - for root, folders, files in os.walk(path, followlinks=True): - for file in files: - full_path = Path(root) / file - if full_path.suffix.lower() in exts: - audio_files.append(full_path) - if pool is not None: - futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal)) - if progress: - print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr) - - if progress: - print("Getting audio metadata...") - meta: tp.List[AudioMeta] = [] - for idx, file_path in enumerate(audio_files): - try: - if pool is None: - m = _get_audio_meta(str(file_path), minimal) - else: - m = futures[idx].result() - if resolve: - m = _resolve_audio_meta(m) - except Exception as err: - print("Error with", str(file_path), err, file=sys.stderr) - continue - meta.append(m) - if progress: - print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr) - meta.sort() - return meta - - -def load_audio_meta(path: tp.Union[str, Path], - resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]: - """Load list of AudioMeta from an optionally compressed json file. - - Args: - path (str or Path): Path to JSON file. - resolve (bool): Whether to resolve the path from AudioMeta (default=True). - fast (bool): activates some tricks to make things faster. - Returns: - List[AudioMeta]: List of audio file path and its total duration. - """ - open_fn = gzip.open if str(path).lower().endswith('.gz') else open - with open_fn(path, 'rb') as fp: # type: ignore - lines = fp.readlines() - meta = [] - for line in lines: - d = json.loads(line) - m = AudioMeta.from_dict(d) - if resolve: - m = _resolve_audio_meta(m, fast=fast) - meta.append(m) - return meta - - -def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]): - """Save the audio metadata to the file pointer as json. - - Args: - path (str or Path): Path to JSON file. - metadata (list of BaseAudioMeta): List of audio meta to save. - """ - Path(path).parent.mkdir(exist_ok=True, parents=True) - open_fn = gzip.open if str(path).lower().endswith('.gz') else open - with open_fn(path, 'wb') as fp: # type: ignore - for m in meta: - json_str = json.dumps(m.to_dict()) + '\n' - json_bytes = json_str.encode('utf-8') - fp.write(json_bytes) - - -class AudioDataset: - """Base audio dataset. - - The dataset takes a list of AudioMeta and create a dataset composed of segments of audio - and potentially additional information, by creating random segments from the list of audio - files referenced in the metadata and applying minimal data pre-processing such as resampling, - mixing of channels, padding, etc. - - If no segment_duration value is provided, the AudioDataset will return the full wav for each - audio file. Otherwise, it will randomly sample audio files and create a segment of the specified - duration, applying padding if required. - - By default, only the torch Tensor corresponding to the waveform is returned. Setting return_info=True - allows to return a tuple containing the torch Tensor and additional metadata on the segment and the - original audio meta. - - Args: - meta (tp.List[AudioMeta]): List of audio files metadata. - segment_duration (float): Optional segment duration of audio to load. - If not specified, the dataset will load the full audio segment from the file. - shuffle (bool): Set to `True` to have the data reshuffled at every epoch. - sample_rate (int): Target sample rate of the loaded audio samples. - channels (int): Target number of channels of the loaded audio samples. - sample_on_duration (bool): Set to `True` to sample segments with probability - dependent on audio file duration. This is only used if `segment_duration` is provided. - sample_on_weight (bool): Set to `True` to sample segments using the `weight` entry of - `AudioMeta`. If `sample_on_duration` is also True, the actual weight will be the product - of the file duration and file weight. This is only used if `segment_duration` is provided. - min_segment_ratio (float): Minimum segment ratio to use when the audio file - is shorter than the desired segment. - max_read_retry (int): Maximum number of retries to sample an audio segment from the dataset. - return_info (bool): Whether to return the wav only or return wav along with segment info and metadata. - min_audio_duration (tp.Optional[float], optional): Minimum audio file duration, in seconds, if provided - audio shorter than this will be filtered out. - max_audio_duration (tp.Optional[float], optional): Maximal audio file duration in seconds, if provided - audio longer than this will be filtered out. - """ - def __init__(self, - meta: tp.List[AudioMeta], - segment_duration: tp.Optional[float] = None, - shuffle: bool = True, - num_samples: int = 10_000, - sample_rate: int = 48_000, - channels: int = 2, - pad: bool = True, - sample_on_duration: bool = True, - sample_on_weight: bool = True, - min_segment_ratio: float = 0.5, - max_read_retry: int = 10, - return_info: bool = False, - min_audio_duration: tp.Optional[float] = None, - max_audio_duration: tp.Optional[float] = None - ): - assert len(meta) > 0, 'No audio meta provided to AudioDataset. Please check loading of audio meta.' - assert segment_duration is None or segment_duration > 0 - assert segment_duration is None or min_segment_ratio >= 0 - logging.debug(f'sample_on_duration: {sample_on_duration}') - logging.debug(f'sample_on_weight: {sample_on_weight}') - logging.debug(f'pad: {pad}') - logging.debug(f'min_segment_ratio: {min_segment_ratio}') - - self.segment_duration = segment_duration - self.min_segment_ratio = min_segment_ratio - self.max_audio_duration = max_audio_duration - self.min_audio_duration = min_audio_duration - if self.min_audio_duration is not None and self.max_audio_duration is not None: - assert self.min_audio_duration <= self.max_audio_duration - self.meta: tp.List[AudioMeta] = self._filter_duration(meta) - assert len(self.meta) # Fail fast if all data has been filtered. - self.total_duration = sum(d.duration for d in self.meta) - - if segment_duration is None: - num_samples = len(self.meta) - self.num_samples = num_samples - self.shuffle = shuffle - self.sample_rate = sample_rate - self.channels = channels - self.pad = pad - self.sample_on_weight = sample_on_weight - self.sample_on_duration = sample_on_duration - self.sampling_probabilities = self._get_sampling_probabilities() - self.max_read_retry = max_read_retry - self.return_info = return_info - - def __len__(self): - return self.num_samples - - def _get_sampling_probabilities(self, normalized: bool = True): - """Return the sampling probabilities for each file inside `self.meta`. - """ - scores: tp.List[float] = [] - for file_meta in self.meta: - score = 1. - if self.sample_on_weight and file_meta.weight is not None: - score *= file_meta.weight - if self.sample_on_duration: - score *= file_meta.duration - scores.append(score) - probabilities = torch.tensor(scores) - if normalized: - probabilities /= probabilities.sum() - return probabilities - - def sample_file(self, rng: torch.Generator) -> AudioMeta: - """Sample a given file from `self.meta`. Can be overriden in subclasses. - This is only called if `segment_duration` is not None. - - You must use the provided random number generator `rng` for reproducibility. - """ - if not self.sample_on_weight and not self.sample_on_duration: - file_index = int(torch.randint(len(self.sampling_probabilities), (1,), generator=rng).item()) - else: - file_index = int(torch.multinomial(self.sampling_probabilities, 1, generator=rng).item()) - - return self.meta[file_index] - - def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentInfo]]: - if self.segment_duration is None: - file_meta = self.meta[index] - out, sr = audio_read(file_meta.path) - out = convert_audio(out, sr, self.sample_rate, self.channels) - n_frames = out.shape[-1] - segment_info = SegmentInfo(file_meta, seek_time=0., n_frames=n_frames, total_frames=n_frames, - sample_rate=self.sample_rate) - else: - rng = torch.Generator() - if self.shuffle: - # We use index, plus extra randomness - rng.manual_seed(index + self.num_samples * random.randint(0, 2**24)) - else: - # We only use index - rng.manual_seed(index) - - for retry in range(self.max_read_retry): - file_meta = self.sample_file(rng) - # We add some variance in the file position even if audio file is smaller than segment - # without ending up with empty segments - max_seek = max(0, file_meta.duration - self.segment_duration * self.min_segment_ratio) - seek_time = torch.rand(1, generator=rng).item() * max_seek - try: - out, sr = audio_read(file_meta.path, seek_time, self.segment_duration, pad=False) - out = convert_audio(out, sr, self.sample_rate, self.channels) - n_frames = out.shape[-1] - target_frames = int(self.segment_duration * self.sample_rate) - if self.pad: - out = F.pad(out, (0, target_frames - n_frames)) - segment_info = SegmentInfo(file_meta, seek_time, n_frames=n_frames, total_frames=target_frames, - sample_rate=self.sample_rate) - except Exception as exc: - logger.warning("Error opening file %s: %r", file_meta.path, exc) - if retry == self.max_read_retry - 1: - raise - else: - break - - if self.return_info: - # Returns the wav and additional information on the wave segment - return out, segment_info - else: - return out - - def collater(self, samples): - """The collater function has to be provided to the dataloader - if AudioDataset has return_info=True in order to properly collate - the samples of a batch. - """ - if self.segment_duration is None and len(samples) > 1: - assert self.pad, "Must allow padding when batching examples of different durations." - - # In this case the audio reaching the collater is of variable length as segment_duration=None. - to_pad = self.segment_duration is None and self.pad - if to_pad: - max_len = max([wav.shape[-1] for wav, _ in samples]) - - def _pad_wav(wav): - return F.pad(wav, (0, max_len - wav.shape[-1])) - - if self.return_info: - if len(samples) > 0: - assert len(samples[0]) == 2 - assert isinstance(samples[0][0], torch.Tensor) - assert isinstance(samples[0][1], SegmentInfo) - - wavs = [wav for wav, _ in samples] - segment_infos = [copy.deepcopy(info) for _, info in samples] - - if to_pad: - # Each wav could be of a different duration as they are not segmented. - for i in range(len(samples)): - # Determines the total legth of the signal with padding, so we update here as we pad. - segment_infos[i].total_frames = max_len - wavs[i] = _pad_wav(wavs[i]) - - wav = torch.stack(wavs) - return wav, segment_infos - else: - assert isinstance(samples[0], torch.Tensor) - if to_pad: - samples = [_pad_wav(s) for s in samples] - return torch.stack(samples) - - def _filter_duration(self, meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]: - """Filters out audio files with short durations. - Removes from meta files that have durations that will not allow to samples examples from them. - """ - orig_len = len(meta) - - # Filter data that is too short. - if self.min_audio_duration is not None: - meta = [m for m in meta if m.duration >= self.min_audio_duration] - - # Filter data that is too long. - if self.max_audio_duration is not None: - meta = [m for m in meta if m.duration <= self.max_audio_duration] - - filtered_len = len(meta) - removed_percentage = 100*(1-float(filtered_len)/orig_len) - msg = 'Removed %.2f percent of the data because it was too short or too long.' % removed_percentage - if removed_percentage < 10: - logging.debug(msg) - else: - logging.warning(msg) - return meta - - @classmethod - def from_meta(cls, root: tp.Union[str, Path], **kwargs): - """Instantiate AudioDataset from a path to a directory containing a manifest as a jsonl file. - - Args: - root (str or Path): Path to root folder containing audio files. - kwargs: Additional keyword arguments for the AudioDataset. - """ - root = Path(root) - if root.is_dir(): - if (root / 'data.jsonl').exists(): - root = root / 'data.jsonl' - elif (root / 'data.jsonl.gz').exists(): - root = root / 'data.jsonl.gz' - else: - raise ValueError("Don't know where to read metadata from in the dir. " - "Expecting either a data.jsonl or data.jsonl.gz file but none found.") - meta = load_audio_meta(root) - return cls(meta, **kwargs) - - @classmethod - def from_path(cls, root: tp.Union[str, Path], minimal_meta: bool = True, - exts: tp.List[str] = DEFAULT_EXTS, **kwargs): - """Instantiate AudioDataset from a path containing (possibly nested) audio files. - - Args: - root (str or Path): Path to root folder containing audio files. - minimal_meta (bool): Whether to only load minimal metadata or not. - exts (list of str): Extensions for audio files. - kwargs: Additional keyword arguments for the AudioDataset. - """ - root = Path(root) - if root.is_file(): - meta = load_audio_meta(root, resolve=True) - else: - meta = find_audio_files(root, exts, minimal=minimal_meta, resolve=True) - return cls(meta, **kwargs) - - -def main(): - logging.basicConfig(stream=sys.stderr, level=logging.INFO) - parser = argparse.ArgumentParser( - prog='audio_dataset', - description='Generate .jsonl files by scanning a folder.') - parser.add_argument('root', help='Root folder with all the audio files') - parser.add_argument('output_meta_file', - help='Output file to store the metadata, ') - parser.add_argument('--complete', - action='store_false', dest='minimal', default=True, - help='Retrieve all metadata, even the one that are expansive ' - 'to compute (e.g. normalization).') - parser.add_argument('--resolve', - action='store_true', default=False, - help='Resolve the paths to be absolute and with no symlinks.') - parser.add_argument('--workers', - default=10, type=int, - help='Number of workers.') - args = parser.parse_args() - meta = find_audio_files(args.root, DEFAULT_EXTS, progress=True, - resolve=args.resolve, minimal=args.minimal, workers=args.workers) - save_audio_meta(args.output_meta_file, meta) - - -if __name__ == '__main__': - main() diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/backbone/resnet34.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/backbone/resnet34.py deleted file mode 100644 index e2c43147a91e723f8acbbc75e745aef9805a96f7..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/backbone/resnet34.py +++ /dev/null @@ -1,97 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn -import math -from maskrcnn_benchmark.layers import FrozenBatchNorm2d -from maskrcnn_benchmark.layers import Conv2d - - -def conv3x3(in_planes, out_planes, stride=1): - """3x3 convolution with padding""" - return Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = FrozenBatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = FrozenBatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__(self, block=BasicBlock, layers=[3, 4, 6, 3]): - self.inplanes = 64 - super(ResNet, self).__init__() - self.conv1 = Conv2d(3, 64, kernel_size=7, stride=2, padding=3, - bias=False) - self.bn1 = FrozenBatchNorm2d(64) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - - for m in self.modules(): - if isinstance(m, Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, FrozenBatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - FrozenBatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x2 = self.layer1(x) - x3 = self.layer2(x2) - x4 = self.layer3(x3) - x5 = self.layer4(x4) - return [x2, x3, x4, x5] diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/demo/create_result_gif.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/demo/create_result_gif.py deleted file mode 100644 index 6646c6b3d45c36f5d356a76aea97fe9a5a9cee06..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/demo/create_result_gif.py +++ /dev/null @@ -1,162 +0,0 @@ -import argparse -import os -import os.path as osp - -import matplotlib.patches as mpatches -import matplotlib.pyplot as plt -import mmcv -import numpy as np - -try: - import imageio -except ImportError: - imageio = None - - -def parse_args(): - parser = argparse.ArgumentParser(description='Create GIF for demo') - parser.add_argument( - 'image_dir', - help='directory where result ' - 'images save path generated by ‘analyze_results.py’') - parser.add_argument( - '--out', - type=str, - default='result.gif', - help='gif path where will be saved') - args = parser.parse_args() - return args - - -def _generate_batch_data(sampler, batch_size): - batch = [] - for idx in sampler: - batch.append(idx) - if len(batch) == batch_size: - yield batch - batch = [] - if len(batch) > 0: - yield batch - - -def create_gif(frames, gif_name, duration=2): - """Create gif through imageio. - - Args: - frames (list[ndarray]): Image frames - gif_name (str): Saved gif name - duration (int): Display interval (s), - Default: 2 - """ - if imageio is None: - raise RuntimeError('imageio is not installed,' - 'Please use “pip install imageio” to install') - imageio.mimsave(gif_name, frames, 'GIF', duration=duration) - - -def create_frame_by_matplotlib(image_dir, - nrows=1, - fig_size=(300, 300), - font_size=15): - """Create gif frame image through matplotlib. - - Args: - image_dir (str): Root directory of result images - nrows (int): Number of rows displayed, Default: 1 - fig_size (tuple): Figure size of the pyplot figure. - Default: (300, 300) - font_size (int): Font size of texts. Default: 15 - - Returns: - list[ndarray]: image frames - """ - - result_dir_names = os.listdir(image_dir) - assert len(result_dir_names) == 2 - # Longer length has higher priority - result_dir_names.reverse() - - images_list = [] - for dir_names in result_dir_names: - images_list.append(mmcv.scandir(osp.join(image_dir, dir_names))) - - frames = [] - for paths in _generate_batch_data(zip(*images_list), nrows): - - fig, axes = plt.subplots(nrows=nrows, ncols=2) - fig.suptitle('Good/bad case selected according ' - 'to the COCO mAP of the single image') - - det_patch = mpatches.Patch(color='salmon', label='prediction') - gt_patch = mpatches.Patch(color='royalblue', label='ground truth') - # bbox_to_anchor may need to be finetuned - plt.legend( - handles=[det_patch, gt_patch], - bbox_to_anchor=(1, -0.18), - loc='lower right', - borderaxespad=0.) - - if nrows == 1: - axes = [axes] - - dpi = fig.get_dpi() - # set fig size and margin - fig.set_size_inches( - (fig_size[0] * 2 + fig_size[0] // 20) / dpi, - (fig_size[1] * nrows + fig_size[1] // 3) / dpi, - ) - - fig.tight_layout() - # set subplot margin - plt.subplots_adjust( - hspace=.05, - wspace=0.05, - left=0.02, - right=0.98, - bottom=0.02, - top=0.98) - - for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)): - image_path_left = osp.join( - osp.join(image_dir, result_dir_names[0], path_tuple[0])) - image_path_right = osp.join( - osp.join(image_dir, result_dir_names[1], path_tuple[1])) - image_left = mmcv.imread(image_path_left) - image_left = mmcv.rgb2bgr(image_left) - image_right = mmcv.imread(image_path_right) - image_right = mmcv.rgb2bgr(image_right) - - if i == 0: - ax_tuple[0].set_title( - result_dir_names[0], fontdict={'size': font_size}) - ax_tuple[1].set_title( - result_dir_names[1], fontdict={'size': font_size}) - ax_tuple[0].imshow( - image_left, extent=(0, *fig_size, 0), interpolation='bilinear') - ax_tuple[0].axis('off') - ax_tuple[1].imshow( - image_right, - extent=(0, *fig_size, 0), - interpolation='bilinear') - ax_tuple[1].axis('off') - - canvas = fig.canvas - s, (width, height) = canvas.print_to_buffer() - buffer = np.frombuffer(s, dtype='uint8') - img_rgba = buffer.reshape(height, width, 4) - rgb, alpha = np.split(img_rgba, [3], axis=2) - img = rgb.astype('uint8') - - frames.append(img) - - return frames - - -def main(): - args = parse_args() - frames = create_frame_by_matplotlib(args.image_dir) - create_gif(frames, args.out) - - -if __name__ == '__main__': - main() diff --git a/spaces/tracinginsights/api/models.py b/spaces/tracinginsights/api/models.py deleted file mode 100644 index cda14a68e399f2dd275c153b4f773c0255cf36e6..0000000000000000000000000000000000000000 --- a/spaces/tracinginsights/api/models.py +++ /dev/null @@ -1,103 +0,0 @@ -from sqlalchemy import Column, Float, Integer, String -from sqlalchemy.ext.declarative import declarative_base - -# from . import database - -import database - - -class RacePace(database.Base): - __tablename__ = "race_pace" - - id = Column(Integer, primary_key=True, autoincrement=True, index=True) - year = Column(Integer) - event = Column(String) - session = Column(String) - Driver = Column(String) - LapTime = Column(Float) - Diff = Column(Float) - Team = Column(String) - fill = Column(String) - - -class TopSpeed(database.Base): - __tablename__ = "top_speed" - - id = Column(Integer, primary_key=True, autoincrement=True, index=True) - year = Column(Integer) - event = Column(String) - session = Column(String) - driver = Column(String) - top_speed = Column(Integer) - compound = Column(String) - team = Column(String) - fill = Column(String) - - -class Overtakes(database.Base): - __tablename__ = "overtakes" - - id = Column(Integer, primary_key=True, autoincrement=True, index=True) - year = Column(Integer) - event = Column(String) - driver = Column(String) - overtakes = Column(Integer) - overtaken = Column(Integer) - - -class FastestLap(database.Base): - __tablename__ = "fastest_lap" - - id = Column(Integer, primary_key=True, autoincrement=True, index=True) - year = Column(Integer) - event = Column(String) - session = Column(String) - driver = Column(String) - lap_time = Column(Float) - diff = Column(Float) - team = Column(String) - fill = Column(String) - - -class DriverStandings(database.Base): - __tablename__ = "driver_standings" - - id = Column(Integer, primary_key=True, autoincrement=True, index=True) - year = Column(Integer) - driver = Column(String) - points = Column(Integer) - car = Column(String) - fill = Column(String) - - -class AvailableYears(database.Base): - __tablename__ = "available_years" - - id = Column(Integer, primary_key=True, autoincrement=True, index=True) - year = Column(Integer) - - -class AvailableEvents(database.Base): - __tablename__ = "available_events" - - id = Column(Integer, primary_key=True, autoincrement=True, index=True) - year = Column(Integer) - event = Column(String) - - -class AvailableSessions(database.Base): - __tablename__ = "available_sessions" - - id = Column(Integer, primary_key=True, autoincrement=True, index=True) - year = Column(Integer) - event = Column(String) - session = Column(String) - - -class AvailableDrivers(database.Base): - __tablename__ = "available_drivers" - - id = Column(Integer, primary_key=True, autoincrement=True, index=True) - color = Column(String) - label = Column(String) - value = Column(String) diff --git a/spaces/tumuyan/vits-miki/modules.py b/spaces/tumuyan/vits-miki/modules.py deleted file mode 100644 index f5af1fd9a20dc03707889f360a39bb4b784a6df3..0000000000000000000000000000000000000000 --- a/spaces/tumuyan/vits-miki/modules.py +++ /dev/null @@ -1,387 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/typ12323/bingo/Dockerfile b/spaces/typ12323/bingo/Dockerfile deleted file mode 100644 index c677b05b75f7e4b2beee8c97fb47957a0861a83e..0000000000000000000000000000000000000000 --- a/spaces/typ12323/bingo/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM weaigc/bingo:latest - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -CMD npm start diff --git a/spaces/unity/ML-Agents-PushBlock/style.css b/spaces/unity/ML-Agents-PushBlock/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/unity/ML-Agents-PushBlock/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/usbethFlerru/sovits-modelsV2/Vray-For-Revit-2015-Crack.md b/spaces/usbethFlerru/sovits-modelsV2/Vray-For-Revit-2015-Crack.md deleted file mode 100644 index 2262a1fd496d3baea7accd49530429cd1e379f97..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/Vray-For-Revit-2015-Crack.md +++ /dev/null @@ -1,126 +0,0 @@ -## Vray For Revit 2015 Crack - - - - - - ![Vray For Revit 2015 Crack](https://1.bp.blogspot.com/-_fjGhjDcGbg/UQCubeSJwII/AAAAAAAAeN0/amV39pv5i9k/s800/RayFire%2B1.61%2Btutorial%2Bcaching%2Bin%2BRayFire.jpg) - - - - - -**Download ⭐ [https://searchdisvipas.blogspot.com/?download=2txnOq](https://searchdisvipas.blogspot.com/?download=2txnOq)** - - - - - - - - - - - - - -# How to Download and Install V-Ray Next for Revit 2015-2020 - - - -V-Ray Next for Revit is a powerful rendering software that lets you create professional, high-resolution images with realistic lights, materials, and cameras. It is fast to set up, easy to use, and compatible with Revit 2015-2020 versions. In this article, we will show you how to download and install V-Ray Next for Revit 2015-2020 with a crack file. - - - -## Step 1: Download V-Ray Next for Revit 2015-2020 - - - -You can download V-Ray Next for Revit 2015-2020 from various websites that offer free software downloads. For example, you can use the following link[^1^] to download the offline installer setup file: - - - -[Download V-Ray Next Build 4.00.03 for Revit 2015-2020 - Get Into PC](https://getintopc.com/softwares/3d-designing/download-v-ray-next-build-4-00-03-for-revit-2015-2020/) - - - -The file size is about 850 MB and it contains the V-Ray Next Build 4.00.03 for Revit 2015-2020 software and the crack file. - - - -## Step 2: Install V-Ray Next for Revit 2015-2020 - - - -After downloading the file, you need to extract it using a software like WinRAR or 7-Zip. Then, you need to run the setup file as administrator and follow the installation wizard. You can choose the destination folder and the components you want to install. Uncheck the option to install License Server as you will use the crack file instead. - - - -When the installation is finished, do not run the software yet. You need to copy and replace some files in the installation directory to activate V-Ray Next for Revit 2015-2020. - - - -## Step 3: Crack V-Ray Next for Revit 2015-2020 - - - -To crack V-Ray Next for Revit 2015-2020, you need to copy and replace two files from the crack folder that you downloaded. The files are cgauth.dll and vray\_BRDFScanned.dll. You need to paste them in the following directories: - - - -- C:\Program Files\Chaos Group\V-Ray\V-Ray for Revit\libraries\vrayappsdk\bin\ - -- C:\Program Files\Chaos Group\V-Ray\V-Ray for Revit\bin\ - - - -Make sure you overwrite the existing files when prompted. This will activate V-Ray Next for Revit 2015-2020 and allow you to use it without any limitations. - - - -## Step 4: Enjoy V-Ray Next for Revit 2015-2020 - - - -Now you can run V-Ray Next for Revit 2015-2020 from your desktop shortcut or from the Revit toolbar. You can render your Revit models with high quality and realism using V-Ray Next's features and settings. You can also watch this video[^3^] for a visual demonstration of the installation and activation process. - - - -We hope this article was helpful and informative. If you have any questions or problems, please leave a comment below or contact us through our website. - - - -## Step 5: Explore V-Ray Next for Revit 2015-2020 Features - - - -V-Ray Next for Revit 2015-2020 has many features and improvements that will help you create stunning renders of your Revit models. Here are some of the features you can explore: - - - -- V-Ray Vision: With V-Ray Vision, you can get real-time high-quality visual feedback of your model as you set up materials, lights, and cameras. You can also take your design to the next level with Chaos Vantage Live Link, and explore even the most complex model in pure ray-traced real-time[^2^]. - -- Asset Editor: With the Asset Editor, you can create, edit, save and manage your V-Ray materials all in one place. You can also assign materials to new proxy objects, save them in the Editor and reuse them in any project[^2^]. - -- Appearance Manager: With the Appearance Manager, you can set basic Revit assets to automatically switch to more realistic V-Ray assets at render time. This will enhance the quality and realism of your renders without affecting your BIM database[^1^]. - -- Decals: With support for Decals, you can add images to paintings, signs, screens and more. You can also adjust the size, position and orientation of the decals easily[^1^]. - -- Lighting Analysis: With the Lighting Analysis render element, you can easily visualize real-world illumination values (lux) in your scene. This will help you analyze and optimize the lighting design of your project[^2^]. - -- Denoiser: With the NVIDIA AI Denoiser, you can use artificial intelligence to remove noise in an instant. This will speed up your rendering process and make close to real-time iterations possible[^2^]. - -- Scene Intelligence: V-Ray Next for Revit 2015-2020 automatically analyzes your scene to optimize rendering settings and performance. This will help you get the best quality in less time[^2^]. - -- Settings Management: V-Ray Next for Revit 2015-2020 allows you to set up individual lighting, mood, and composition per view in the same Revit model. You can also easily share render settings across teams and projects[^2^]. - - - -These are just some of the features that V-Ray Next for Revit 2015-2020 offers. You can learn more about them and other features on the official website[^2^] or by watching some tutorials on YouTube. - - 1b8d091108 - - - - - diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Accelrys Material Studio 6.0 Crack.md b/spaces/usbethFlerru/sovits-modelsV2/example/Accelrys Material Studio 6.0 Crack.md deleted file mode 100644 index b3d1fa74e460aabce119a363c3a34e4b9f398660..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Accelrys Material Studio 6.0 Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

    accelrys material studio 6.0 crack


    DOWNLOAD ❤❤❤ https://urlcod.com/2uyUUN



    -
    -... calculations are performed with the CASTEP (CAmbridge Serial Total Energy Package) code implemented into Materials Studio versions 6.0 (Accelrys, 2011). 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Age Of Empires 3 Gold Edition Free Download Full Version.md b/spaces/usbethFlerru/sovits-modelsV2/example/Age Of Empires 3 Gold Edition Free Download Full Version.md deleted file mode 100644 index 3e1b666ec976ad28f8cea934b4feeb7ffae6c1d5..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Age Of Empires 3 Gold Edition Free Download Full Version.md +++ /dev/null @@ -1,6 +0,0 @@ -

    age of empires 3 gold edition free download full version


    Download File ——— https://urlcod.com/2uyWDD



    -
    -AoE III Gold Edition is one of the best in the AoE series. It includes excellent graphics and amazing gameplay. AoE III has a lot of customization options. AoE III: The Battle of Arsia is a real-time strategy game with tactical elements in which you can command an entire army. The game features excellent graphics, as well as a wide range of tactical options. In addition to these two significant factors, AoE III has a number of other unique features. The game offers a wide range of customization options for the army. Each race has its own set of units, their characteristics and appearance. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Android Application Development For Dummies Epub Free 33 ((EXCLUSIVE)).md b/spaces/usbethFlerru/sovits-modelsV2/example/Android Application Development For Dummies Epub Free 33 ((EXCLUSIVE)).md deleted file mode 100644 index eea95673752f453a95c71dbd66e31d68c1f9ba2b..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Android Application Development For Dummies Epub Free 33 ((EXCLUSIVE)).md +++ /dev/null @@ -1,14 +0,0 @@ -
    -

    This EDRLab application is in constant development and aims at becoming a reference for accessing EPUB 3 publications in reflow or fixed layout format, audiobooks and visual narratives, PDF documents and DAISY 2.02 and 3 accessible ebooks; LCP protected or not.

    -

    This software is free, open-source and as such, its development is only possible because it is funded by different organizations. Among these, members of EDRLab, private or public grants and small recurring donations from happy users (individuals or companies).

    -

    android application development for dummies epub free 33


    Download Filehttps://urlcod.com/2uyVvn



    -

    EDRLab decided to build such an application and release it for free, in order to provide users a great way to enjoy on a large screen EPUB publications, comics / manga / bandes dessinées, audiobooks, LCP protected PDF documents.

    -

    In addition, because of the various content types that can be embedded in OCF ZIP Containerfiles, application/epub+zip may describe content that poses securityimplications beyond those noted here. However, only in cases where the processor recognizesand processes the additional content, or where further processing of that content isdispatched to other processors, would security issues potentially arise. In such cases,matters of security would fall outside the domain of this registration document.

    -

    The EPUB 3 specification supersedes both RFC 4839 and the Open ContainerFormat 2.0.1 specification, which is located at _library/epub/OCF_2.0.1_draft.doc, andwhich also uses the application/epub+zip media type.

    -

    Mantle is a desktop application for reading e-books of the EPUB format. It currently has a minimal user interface and is still early in development so is not yet feature complete.

    Mantle supports opening multiple books in tab view, and allows users to reflect the reading direction. Mantle is released under MIT license.

    -

    Most data recovery tools for Windows cost under US$100 for a fully licensed version. Disk Drill enables you to try the software and recover 500 MB of data before making any financial investment in the application. The free download also lets its users benefit from the unlimited free data protection tools built into the program.

    -

    (All made freely available under a Creative Commons BY-NC-SA license on If you'd like to buy my books online, I'd recommend purchasing them from the publisher's website: Amazon may be slightly cheaper, but No Starch Press offers the ebooks for free (in DRM-free PDF, epub, and Kindle formats) when you buy a print copy.)

    -

    Questions regarding the number of hours required for BCPPS recertification should be directed to BPS at (202) 946-5026 or www.bpsweb.org. The ACCP Recertification Dashboard is a free online tool that can track recertification credits as they are earned through ACCP and schedule new opportunities for credits from upcoming ACCP professional development programs.

    -

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Assassins Creed Brotherhood Full Movie HD Download - Dont Miss the Stunning Visuals and Soundtrack.md b/spaces/usbethFlerru/sovits-modelsV2/example/Assassins Creed Brotherhood Full Movie HD Download - Dont Miss the Stunning Visuals and Soundtrack.md deleted file mode 100644 index e30e407edf4145636668a9499d4faee74596cc0b..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Assassins Creed Brotherhood Full Movie HD Download - Dont Miss the Stunning Visuals and Soundtrack.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Assassin's Creed: Brotherhood full movie hd download


    Download »»» https://urlcod.com/2uyVwq



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/user238921933/stable-diffusion-webui/javascript/ui.js b/spaces/user238921933/stable-diffusion-webui/javascript/ui.js deleted file mode 100644 index b7a8268a8fcdf9821cb3af31efea9e0283da1bfe..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/javascript/ui.js +++ /dev/null @@ -1,338 +0,0 @@ -// various functions for interaction with ui.py not large enough to warrant putting them in separate files - -function set_theme(theme){ - gradioURL = window.location.href - if (!gradioURL.includes('?__theme=')) { - window.location.replace(gradioURL + '?__theme=' + theme); - } -} - -function selected_gallery_index(){ - var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery] .gallery-item') - var button = gradioApp().querySelector('[style="display: block;"].tabitem div[id$=_gallery] .gallery-item.\\!ring-2') - - var result = -1 - buttons.forEach(function(v, i){ if(v==button) { result = i } }) - - return result -} - -function extract_image_from_gallery(gallery){ - if(gallery.length == 1){ - return [gallery[0]] - } - - index = selected_gallery_index() - - if (index < 0 || index >= gallery.length){ - return [null] - } - - return [gallery[index]]; -} - -function args_to_array(args){ - res = [] - for(var i=0;i label > textarea"); - - if(counter.parentElement == prompt.parentElement){ - return - } - - prompt.parentElement.insertBefore(counter, prompt) - counter.classList.add("token-counter") - prompt.parentElement.style.position = "relative" - - promptTokecountUpdateFuncs[id] = function(){ update_token_counter(id_button); } - textarea.addEventListener("input", promptTokecountUpdateFuncs[id]); - } - - registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button') - registerTextarea('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button') - registerTextarea('img2img_prompt', 'img2img_token_counter', 'img2img_token_button') - registerTextarea('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button') - - show_all_pages = gradioApp().getElementById('settings_show_all_pages') - settings_tabs = gradioApp().querySelector('#settings div') - if(show_all_pages && settings_tabs){ - settings_tabs.appendChild(show_all_pages) - show_all_pages.onclick = function(){ - gradioApp().querySelectorAll('#settings > div').forEach(function(elem){ - elem.style.display = "block"; - }) - } - } -}) - -onOptionsChanged(function(){ - elem = gradioApp().getElementById('sd_checkpoint_hash') - sd_checkpoint_hash = opts.sd_checkpoint_hash || "" - shorthash = sd_checkpoint_hash.substr(0,10) - - if(elem && elem.textContent != shorthash){ - elem.textContent = shorthash - elem.title = sd_checkpoint_hash - elem.href = "https://google.com/search?q=" + sd_checkpoint_hash - } -}) - -let txt2img_textarea, img2img_textarea = undefined; -let wait_time = 800 -let token_timeouts = {}; - -function update_txt2img_tokens(...args) { - update_token_counter("txt2img_token_button") - if (args.length == 2) - return args[0] - return args; -} - -function update_img2img_tokens(...args) { - update_token_counter("img2img_token_button") - if (args.length == 2) - return args[0] - return args; -} - -function update_token_counter(button_id) { - if (token_timeouts[button_id]) - clearTimeout(token_timeouts[button_id]); - token_timeouts[button_id] = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time); -} - -function restart_reload(){ - document.body.innerHTML='

    Reloading...

    '; - setTimeout(function(){location.reload()},2000) - - return [] -} - -// Simulate an `input` DOM event for Gradio Textbox component. Needed after you edit its contents in javascript, otherwise your edits -// will only visible on web page and not sent to python. -function updateInput(target){ - let e = new Event("input", { bubbles: true }) - Object.defineProperty(e, "target", {value: target}) - target.dispatchEvent(e); -} - - -var desiredCheckpointName = null; -function selectCheckpoint(name){ - desiredCheckpointName = name; - gradioApp().getElementById('change_checkpoint').click() -} diff --git a/spaces/valhalla/XGLM-zero-shot-COPA/app.py b/spaces/valhalla/XGLM-zero-shot-COPA/app.py deleted file mode 100644 index accdbb27b433b169542629b801b89a79df5f2ce3..0000000000000000000000000000000000000000 --- a/spaces/valhalla/XGLM-zero-shot-COPA/app.py +++ /dev/null @@ -1,62 +0,0 @@ -import gradio as gr - -import torch -import torch.nn.functional as F - -from transformers import XGLMTokenizer, XGLMForCausalLM - -tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-2.9B") -model = XGLMForCausalLM.from_pretrained("facebook/xglm-2.9B") - -data_samples = { - 'en': [ - { - "premise": "I wanted to conserve energy.", - "choice1": "I swept the floor in the unoccupied room.", - "choice2": "I shut off the light in the unoccupied room.", - "question": "effect", - "label": "1" - } - ], - 'zh': [ - { - "premise": "我想节约能源。", - "choice1": "我在空着的房间里扫了地板。", - "choice2": "我把空房间里的灯关了。", - "question": "effect", - "label": "1" - } - ] -} - -def get_logprobs(prompt): - inputs = tokenizer(prompt, return_tensors="pt") - input_ids, output_ids = inputs["input_ids"], inputs["input_ids"][:, 1:] - outputs = model(**inputs, labels=input_ids) - logits = outputs.logits - logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, output_ids.unsqueeze(2)) - return logprobs - - -# Zero-shot evaluation for the Choice of Plausible Alternatives (COPA) task. -# A return value of 0 indicates that the first alternative is more plausible, -# while 1 indicates that the second alternative is more plausible. -def COPA_eval(premise, choice1, choice2): - lprob1 = get_logprobs(premise + "\n" + choice1).sum() - lprob2 = get_logprobs(premise + "\n" + choice2).sum() - #return 0 if lprob1 > lprob2 else 1 - return choice1 if lprob1 > lprob2 else choice2 - - - -iface = gr.Interface( - fn=COPA_eval, - inputs=["text", "text", "text"], - outputs=["text"], - theme="huggingface", - title="XGLM-Few-shot Learning with Multilingual Language Models", - description="A simple interface for zero-shot evaluation for the Choice of Plausible Alternatives (COPA) task using XGLM.", - examples=[["I wanted to conserve energy.", "I swept the floor in the unoccupied room.", "I shut off the light in the unoccupied room.",], ["我想节约能源。", "我在空着的房间里扫了地板。", "我把空房间里的灯关了。",]], - article="

    Few-shot Learning with Multilingual Language Models" -) -iface.launch() \ No newline at end of file diff --git a/spaces/vumichien/canvas_controlnet/ldm/modules/image_degradation/bsrgan.py b/spaces/vumichien/canvas_controlnet/ldm/modules/image_degradation/bsrgan.py deleted file mode 100644 index 32ef56169978e550090261cddbcf5eb611a6173b..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/ldm/modules/image_degradation/bsrgan.py +++ /dev/null @@ -1,730 +0,0 @@ -# -*- coding: utf-8 -*- -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) - img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(30, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - elif i == 1: - image = add_blur(image, sf=sf) - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image":image} - return example - - -# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... -def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): - """ - This is an extended degradation model by combining - the degradation models of BSRGAN and Real-ESRGAN - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - use_shuffle: the degradation shuffle - use_sharp: sharpening the img - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - if use_sharp: - img = add_sharpening(img) - hq = img.copy() - - if random.random() < shuffle_prob: - shuffle_order = random.sample(range(13), 13) - else: - shuffle_order = list(range(13)) - # local shuffle for noise, JPEG is always the last one - shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) - shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) - - poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 - - for i in shuffle_order: - if i == 0: - img = add_blur(img, sf=sf) - elif i == 1: - img = add_resize(img, sf=sf) - elif i == 2: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 3: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 4: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 5: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - elif i == 6: - img = add_JPEG_noise(img) - elif i == 7: - img = add_blur(img, sf=sf) - elif i == 8: - img = add_resize(img, sf=sf) - elif i == 9: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 10: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 11: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 12: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - else: - print('check the shuffle!') - - # resize to desired size - img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), - interpolation=random.choice([1, 2, 3])) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf, lq_patchsize) - - return img, hq - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - print(img) - img = util.uint2single(img) - print(img) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_lq = deg_fn(img) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') - - diff --git a/spaces/wdnmd12/Real-CUGAN/app.py b/spaces/wdnmd12/Real-CUGAN/app.py deleted file mode 100644 index 2439c5cec6b61e8a517f957daf710cbb6b5c3cf6..0000000000000000000000000000000000000000 --- a/spaces/wdnmd12/Real-CUGAN/app.py +++ /dev/null @@ -1,62 +0,0 @@ -from upcunet_v3 import RealWaifuUpScaler -import gradio as gr -import time -import logging -import os -from PIL import ImageOps -import numpy as np -import math - - -def greet(input_img, input_model_name, input_tile_mode): - # if input_img.size[0] * input_img.size[1] > 256 * 256: - # y = int(math.sqrt(256*256/input_img.size[0]*input_img.size[1])) - # x = int(input_img.size[0]/input_img.size[1]*y) - # input_img = ImageOps.fit(input_img, (x, y)) - input_img = np.array(input_img) - if input_model_name not in model_cache: - t1 = time.time() - upscaler = RealWaifuUpScaler(input_model_name[2], ModelPath + input_model_name, half=False, device="cpu") - t2 = time.time() - logger.info(f'load model time, {t2 - t1}') - model_cache[input_model_name] = upscaler - else: - upscaler = model_cache[input_model_name] - logger.info(f'load model from cache') - - start = time.time() - result = upscaler(input_img, tile_mode=input_tile_mode) - end = time.time() - logger.info(f'input_model_name, {input_model_name}') - logger.info(f'input_tile_mode, {input_tile_mode}') - logger.info(f'input shape, {input_img.shape}') - logger.info(f'output shape, {result.shape}') - logger.info(f'speed time, {end - start}') - return result - - -if __name__ == '__main__': - logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s") - logger = logging.getLogger() - - ModelPath = "weights_v3/" - model_cache = {} - - input_model_name = gr.inputs.Dropdown(os.listdir(ModelPath), default="up2x-latest-denoise2x.pth", label='选择model') - input_tile_mode = gr.inputs.Dropdown([0, 1, 2, 3, 4], default=2, label='选择tile_mode') - input_img = gr.inputs.Image(label='image', type='pil') - - inputs = [input_img, input_model_name, input_tile_mode] - outputs = "image" - iface = gr.Interface(fn=greet, - inputs=inputs, - outputs=outputs, - allow_screenshot=False, - allow_flagging='never', - examples=[['test-img.jpg', "up2x-latest-denoise2x.pth", 2]], - article='[https://github.com/bilibili/ailab/tree/main/Real-CUGAN](https://github.com/bilibili/ailab/tree/main/Real-CUGAN)
    ' - '感谢b站开源的项目,图片过大会导致内存不足,所有我将图片裁剪小,想体验大图片的效果请自行前往上面的链接。
    ' - '修改bbb' - 'The large image will lead to memory limit exceeded. So I crop and resize image. ' - 'If you want to experience the large image, please go to the link above.') - iface.launch() diff --git a/spaces/webshop/amazon_shop/templates/item_page.html b/spaces/webshop/amazon_shop/templates/item_page.html deleted file mode 100644 index e4dcf136a20cc4e6957063736ac990c848b8cb6d..0000000000000000000000000000000000000000 --- a/spaces/webshop/amazon_shop/templates/item_page.html +++ /dev/null @@ -1,113 +0,0 @@ - - - - - - - - - - - -

    -
    -
    -
    -

    Instruction:
    {{ instruction_text }}

    -
    -
    -
    -
    -
    - -
    -
    -
    -
    - -
    -
    -
    -
    -
    - -
    - {% for option_name, option_contents in product_info.options.items() %} -
    -

    {{ option_name }}

    -
    - {% for option_content in option_contents %} - {% set current_options = options.copy() %} - {% set _ = current_options.update({option_name: option_content}) %} - {% set url = url_for('item_page', session_id=session_id, asin=asin, keywords=keywords, page=page, options=current_options) %} - - - {% endfor %} -
    -
    - {% endfor %} -
    -
    -

    {{product_info.Title}}

    -

    Price: {{product_info.Price}}

    -

    Rating: {{product_info.Rating}}

    -
    -
    -
    - -
    -
    -
    -
    - -
    -
    -
    -
    - -
    -
    - -
    -
    -
    -
    -
    - -
    -
    -
    -
    -
    - - - \ No newline at end of file diff --git a/spaces/weishao2019/ChuanhuChatGPT/presets.py b/spaces/weishao2019/ChuanhuChatGPT/presets.py deleted file mode 100644 index 935b9b8d9250838ef06af8e3fbe0979162bfa394..0000000000000000000000000000000000000000 --- a/spaces/weishao2019/ChuanhuChatGPT/presets.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding:utf-8 -*- - -# ChatGPT 设置 -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -# 错误信息 -standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀 -error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误 -connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时 -read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时 -proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误 -ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误 -no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位 - -max_token_streaming = 3500 # 流式对话时的最大 token 数 -timeout_streaming = 30 # 流式对话时的超时时间 -max_token_all = 3500 # 非流式对话时的最大 token 数 -timeout_all = 200 # 非流式对话时的超时时间 -enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框 -HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True - -SIM_K = 5 -INDEX_QUERY_TEMPRATURE = 1.0 - -title = """

    川虎ChatGPT 🚀

    """ -description = """\ -
    - -由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发 - -访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本 - -此App使用 `gpt-3.5-turbo` 大语言模型 -
    -""" - -summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt - -MODELS = [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0301", - "gpt-4", - "gpt-4-0314", - "gpt-4-32k", - "gpt-4-32k-0314", -] # 可选的模型 - - -WEBSEARCH_PTOMPT_TEMPLATE = """\ -Web search results: - -{web_results} -Current date: {current_date} - -Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. -Query: {query} -Reply in 中文""" - -PROMPT_TEMPLATE = """\ -Context information is below. ---------------------- -{context_str} ---------------------- -Current date: {current_date}. -Using the provided context information, write a comprehensive reply to the given query. -Make sure to cite results using [number] notation after the reference. -If the provided context information refer to multiple subjects with the same name, write separate answers for each subject. -Use prior knowledge only if the given context didn't provide enough information. -Answer the question: {query_str} -Reply in 中文 -""" - -REFINE_TEMPLATE = """\ -The original question is as follows: {query_str} -We have provided an existing answer: {existing_answer} -We have the opportunity to refine the existing answer -(only if needed) with some more context below. ------------- -{context_msg} ------------- -Given the new context, refine the original answer to better -Answer in the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch. -If the context isn't useful, return the original answer. -""" diff --git a/spaces/wwwwwwww2/bingo/src/lib/hooks/use-copy-to-clipboard.tsx b/spaces/wwwwwwww2/bingo/src/lib/hooks/use-copy-to-clipboard.tsx deleted file mode 100644 index 62f7156dca246c46b213151af003a3a177977ccf..0000000000000000000000000000000000000000 --- a/spaces/wwwwwwww2/bingo/src/lib/hooks/use-copy-to-clipboard.tsx +++ /dev/null @@ -1,33 +0,0 @@ -'use client' - -import * as React from 'react' - -export interface useCopyToClipboardProps { - timeout?: number -} - -export function useCopyToClipboard({ - timeout = 2000 -}: useCopyToClipboardProps) { - const [isCopied, setIsCopied] = React.useState(false) - - const copyToClipboard = (value: string) => { - if (typeof window === 'undefined' || !navigator.clipboard?.writeText) { - return - } - - if (!value) { - return - } - - navigator.clipboard.writeText(value).then(() => { - setIsCopied(true) - - setTimeout(() => { - setIsCopied(false) - }, timeout) - }) - } - - return { isCopied, copyToClipboard } -} diff --git a/spaces/xdecoder/Demo/utils/arguments.py b/spaces/xdecoder/Demo/utils/arguments.py deleted file mode 100644 index c1a3fa8069e15a287aedd7d15828fa6e23c4fda4..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Demo/utils/arguments.py +++ /dev/null @@ -1,98 +0,0 @@ -import yaml -import json -import argparse -import logging - -logger = logging.getLogger(__name__) - - -def load_config_dict_to_opt(opt, config_dict): - """ - Load the key, value pairs from config_dict to opt, overriding existing values in opt - if there is any. - """ - if not isinstance(config_dict, dict): - raise TypeError("Config must be a Python dictionary") - for k, v in config_dict.items(): - k_parts = k.split('.') - pointer = opt - for k_part in k_parts[:-1]: - if k_part not in pointer: - pointer[k_part] = {} - pointer = pointer[k_part] - assert isinstance(pointer, dict), "Overriding key needs to be inside a Python dict." - ori_value = pointer.get(k_parts[-1]) - pointer[k_parts[-1]] = v - if ori_value: - logger.warning(f"Overrided {k} from {ori_value} to {pointer[k_parts[-1]]}") - - -def load_opt_from_config_files(conf_file): - """ - Load opt from the config files, settings in later files can override those in previous files. - - Args: - conf_files: config file path - - Returns: - dict: a dictionary of opt settings - """ - opt = {} - with open(conf_file, encoding='utf-8') as f: - config_dict = yaml.safe_load(f) - - load_config_dict_to_opt(opt, config_dict) - - return opt - - -def load_opt_command(args): - parser = argparse.ArgumentParser(description='MainzTrain: Pretrain or fine-tune models for NLP tasks.') - parser.add_argument('command', help='Command: train/evaluate/train-and-evaluate') - parser.add_argument('--conf_files', required=True, help='Path(s) to the MainzTrain config file(s).') - parser.add_argument('--config_overrides', nargs='*', help='Override parameters on config with a json style string, e.g. {"": , "..": }. A key with "." updates the object in the corresponding nested dict. Remember to escape " in command line.') - parser.add_argument('--overrides', help='arguments that used to overide the config file in cmdline', nargs=argparse.REMAINDER) - - cmdline_args = parser.parse_args() if not args else parser.parse_args(args) - - opt = load_opt_from_config_files(cmdline_args.conf_files) - - if cmdline_args.config_overrides: - config_overrides_string = ' '.join(cmdline_args.config_overrides) - logger.warning(f"Command line config overrides: {config_overrides_string}") - config_dict = json.loads(config_overrides_string) - load_config_dict_to_opt(opt, config_dict) - - if cmdline_args.overrides: - assert len(cmdline_args.overrides) % 2 == 0, "overides arguments is not paired, required: key value" - keys = [cmdline_args.overrides[idx*2] for idx in range(len(cmdline_args.overrides)//2)] - vals = [cmdline_args.overrides[idx*2+1] for idx in range(len(cmdline_args.overrides)//2)] - vals = [val.replace('false', '').replace('False','') if len(val.replace(' ', '')) == 5 else val for val in vals] - - types = [] - for key in keys: - key = key.split('.') - ele = opt.copy() - while len(key) > 0: - ele = ele[key.pop(0)] - types.append(type(ele)) - - config_dict = {x:z(y) for x,y,z in zip(keys, vals, types)} - load_config_dict_to_opt(opt, config_dict) - - # combine cmdline_args into opt dictionary - for key, val in cmdline_args.__dict__.items(): - if val is not None: - opt[key] = val - - return opt, cmdline_args - - -def save_opt_to_json(opt, conf_file): - with open(conf_file, 'w', encoding='utf-8') as f: - json.dump(opt, f, indent=4) - - -def save_opt_to_yaml(opt, conf_file): - with open(conf_file, 'w', encoding='utf-8') as f: - yaml.dump(opt, f) diff --git a/spaces/yangogo/bingo/src/components/chat-list.tsx b/spaces/yangogo/bingo/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/yangogo/bingo/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
    - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
    - ) -} diff --git a/spaces/yanli01/wrwj/assets/custom.css b/spaces/yanli01/wrwj/assets/custom.css deleted file mode 100644 index f98c7df263b11afa4ddfb5d6ed18aef2ef234226..0000000000000000000000000000000000000000 --- a/spaces/yanli01/wrwj/assets/custom.css +++ /dev/null @@ -1,250 +0,0 @@ -:root { - --chatbot-color-light: #F3F3F3; - --chatbot-color-dark: #121111; -} - -/* 覆盖gradio的页脚信息QAQ */ -footer { - display: none !important; -} -#footer{ - text-align: center; -} -#footer div{ - display: inline-block; -} -#footer .versions{ - font-size: 85%; - opacity: 0.85; -} - -/* user_info */ -#user_info { - white-space: nowrap; - margin-top: -1.3em !important; - padding-left: 112px !important; -} -#user_info p { - font-size: .85em; - font-family: monospace; - color: var(--body-text-color-subdued); -} - -/* status_display */ -#status_display { - display: flex; - min-height: 2em; - align-items: flex-end; - justify-content: flex-end; -} -#status_display p { - font-size: .85em; - font-family: monospace; - color: var(--body-text-color-subdued); -} - -#chuanhu_chatbot, #status_display { - transition: all 0.6s; -} - -/* usage_display */ -#usage_display { - position: relative; - margin: 0; - box-shadow: var(--block-shadow); - border-width: var(--block-border-width); - border-color: var(--block-border-color); - border-radius: var(--block-radius); - background: var(--block-background-fill); - width: 100%; - line-height: var(--line-sm); - min-height: 2em; -} -#usage_display p, #usage_display span { - margin: 0; - padding: .5em 1em; - font-size: .85em; - color: var(--body-text-color-subdued); -} -.progress-bar { - background-color: var(--input-background-fill);; - margin: 0 1em; - height: 20px; - border-radius: 10px; - overflow: hidden; -} -.progress { - background-color: var(--block-title-background-fill);; - height: 100%; - border-radius: 10px; - text-align: right; - transition: width 0.5s ease-in-out; -} -.progress-text { - /* color: white; */ - color: var(--color-accent) !important; - font-size: 1em !important; - font-weight: bold; - padding-right: 10px; - line-height: 20px; -} -/* list */ -ol:not(.options), ul:not(.options) { - padding-inline-start: 2em !important; -} - -/* 亮色 */ -@media (prefers-color-scheme: light) { - #chuanhu_chatbot { - background-color: var(--chatbot-color-light) !important; - color: #000000 !important; - } - [data-testid = "bot"] { - background-color: #FFFFFF !important; - } - [data-testid = "user"] { - background-color: #95EC69 !important; - } -} -/* 暗色 */ -@media (prefers-color-scheme: dark) { - #chuanhu_chatbot { - background-color: var(--chatbot-color-dark) !important; - color: #FFFFFF !important; - } - [data-testid = "bot"] { - background-color: #2C2C2C !important; - } - [data-testid = "user"] { - background-color: #26B561 !important; - } - body { - background-color: var(--neutral-950) !important; - } -} -/* 对话气泡 */ -[class *= "message"] { - border-radius: var(--radius-xl) !important; - border: none; - padding: var(--spacing-xl) !important; - font-size: var(--text-md) !important; - line-height: var(--line-md) !important; - min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); - min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); -} -[data-testid = "bot"] { - max-width: 85%; - border-bottom-left-radius: 0 !important; -} -[data-testid = "user"] { - max-width: 85%; - width: auto !important; - border-bottom-right-radius: 0 !important; -} -/* 表格 */ -table { - margin: 1em 0; - border-collapse: collapse; - empty-cells: show; -} -td,th { - border: 1.2px solid var(--border-color-primary) !important; - padding: 0.2em; -} -thead { - background-color: rgba(175,184,193,0.2); -} -thead th { - padding: .5em .2em; -} -/* 行内代码 */ -code { - display: inline; - white-space: break-spaces; - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 代码块 */ -pre code { - display: block; - overflow: auto; - white-space: pre; - background-color: hsla(0, 0%, 0%, 80%)!important; - border-radius: 10px; - padding: 1.4em 1.2em 0em 1.4em; - margin: 1.2em 2em 1.2em 0.5em; - color: #FFF; - box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2); -} -/* 代码高亮样式 */ -.highlight .hll { background-color: #49483e } -.highlight .c { color: #75715e } /* Comment */ -.highlight .err { color: #960050; background-color: #1e0010 } /* Error */ -.highlight .k { color: #66d9ef } /* Keyword */ -.highlight .l { color: #ae81ff } /* Literal */ -.highlight .n { color: #f8f8f2 } /* Name */ -.highlight .o { color: #f92672 } /* Operator */ -.highlight .p { color: #f8f8f2 } /* Punctuation */ -.highlight .ch { color: #75715e } /* Comment.Hashbang */ -.highlight .cm { color: #75715e } /* Comment.Multiline */ -.highlight .cp { color: #75715e } /* Comment.Preproc */ -.highlight .cpf { color: #75715e } /* Comment.PreprocFile */ -.highlight .c1 { color: #75715e } /* Comment.Single */ -.highlight .cs { color: #75715e } /* Comment.Special */ -.highlight .gd { color: #f92672 } /* Generic.Deleted */ -.highlight .ge { font-style: italic } /* Generic.Emph */ -.highlight .gi { color: #a6e22e } /* Generic.Inserted */ -.highlight .gs { font-weight: bold } /* Generic.Strong */ -.highlight .gu { color: #75715e } /* Generic.Subheading */ -.highlight .kc { color: #66d9ef } /* Keyword.Constant */ -.highlight .kd { color: #66d9ef } /* Keyword.Declaration */ -.highlight .kn { color: #f92672 } /* Keyword.Namespace */ -.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ -.highlight .kr { color: #66d9ef } /* Keyword.Reserved */ -.highlight .kt { color: #66d9ef } /* Keyword.Type */ -.highlight .ld { color: #e6db74 } /* Literal.Date */ -.highlight .m { color: #ae81ff } /* Literal.Number */ -.highlight .s { color: #e6db74 } /* Literal.String */ -.highlight .na { color: #a6e22e } /* Name.Attribute */ -.highlight .nb { color: #f8f8f2 } /* Name.Builtin */ -.highlight .nc { color: #a6e22e } /* Name.Class */ -.highlight .no { color: #66d9ef } /* Name.Constant */ -.highlight .nd { color: #a6e22e } /* Name.Decorator */ -.highlight .ni { color: #f8f8f2 } /* Name.Entity */ -.highlight .ne { color: #a6e22e } /* Name.Exception */ -.highlight .nf { color: #a6e22e } /* Name.Function */ -.highlight .nl { color: #f8f8f2 } /* Name.Label */ -.highlight .nn { color: #f8f8f2 } /* Name.Namespace */ -.highlight .nx { color: #a6e22e } /* Name.Other */ -.highlight .py { color: #f8f8f2 } /* Name.Property */ -.highlight .nt { color: #f92672 } /* Name.Tag */ -.highlight .nv { color: #f8f8f2 } /* Name.Variable */ -.highlight .ow { color: #f92672 } /* Operator.Word */ -.highlight .w { color: #f8f8f2 } /* Text.Whitespace */ -.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ -.highlight .mf { color: #ae81ff } /* Literal.Number.Float */ -.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ -.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ -.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ -.highlight .sa { color: #e6db74 } /* Literal.String.Affix */ -.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ -.highlight .sc { color: #e6db74 } /* Literal.String.Char */ -.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ -.highlight .sd { color: #e6db74 } /* Literal.String.Doc */ -.highlight .s2 { color: #e6db74 } /* Literal.String.Double */ -.highlight .se { color: #ae81ff } /* Literal.String.Escape */ -.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ -.highlight .si { color: #e6db74 } /* Literal.String.Interpol */ -.highlight .sx { color: #e6db74 } /* Literal.String.Other */ -.highlight .sr { color: #e6db74 } /* Literal.String.Regex */ -.highlight .s1 { color: #e6db74 } /* Literal.String.Single */ -.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ -.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ -.highlight .fm { color: #a6e22e } /* Name.Function.Magic */ -.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ -.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ -.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ -.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ -.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/actions/track.ts b/spaces/yderre-aubay/midi-player-demo/src/main/actions/track.ts deleted file mode 100644 index 9b54072a19b83c37adf686e963c20ab15838e38c..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/actions/track.ts +++ /dev/null @@ -1,373 +0,0 @@ -import { AnyChannelEvent, AnyEvent, SetTempoEvent } from "midifile-ts" -import { closedRange } from "../../common/helpers/array" -import { - createValueEvent, - isValueEvent, - ValueEventType, -} from "../../common/helpers/valueEvent" -import { - panMidiEvent, - programChangeMidiEvent, - timeSignatureMidiEvent, - volumeMidiEvent, -} from "../../common/midi/MidiEvent" -import Quantizer from "../../common/quantizer" -import { getMeasureStart } from "../../common/song/selector" -import Track, { - isNoteEvent, - NoteEvent, - TrackEvent, - TrackEventOf, -} from "../../common/track" -import RootStore from "../stores/RootStore" -import { pushHistory } from "./history" -import { - resizeNotesInSelectionLeftBy, - resizeNotesInSelectionRightBy, -} from "./selection" - -export const changeTempo = - ({ song, pushHistory }: RootStore) => - (id: number, microsecondsPerBeat: number) => { - const track = song.conductorTrack - if (track === undefined) { - return - } - pushHistory() - track.updateEvent>(id, { - microsecondsPerBeat: microsecondsPerBeat, - }) - } - -/* events */ - -export const changeNotesVelocity = - ({ pianoRollStore: { selectedTrack }, pushHistory }: RootStore) => - (noteIds: number[], velocity: number) => { - if (selectedTrack === undefined) { - return - } - pushHistory() - selectedTrack.updateEvents( - noteIds.map((id) => ({ - id, - velocity: velocity, - })), - ) - } - -export const createEvent = - ({ - player, - pianoRollStore: { quantizer, selectedTrack }, - pushHistory, - }: RootStore) => - (e: AnyChannelEvent, tick?: number) => { - if (selectedTrack === undefined) { - throw new Error("selected track is undefined") - } - pushHistory() - const id = selectedTrack.createOrUpdate({ - ...e, - tick: quantizer.round(tick ?? player.position), - }).id - - // 即座に反映する - // Reflect immediately - if (tick !== undefined) { - player.sendEvent(e) - } - - return id - } - -// Update controller events in the range with linear interpolation values -export const updateEventsInRange = - ( - track: Track | undefined, - quantizer: Quantizer, - filterEvent: (e: TrackEvent) => boolean, - createEvent: (value: number) => AnyEvent, - ) => - ( - startValue: number, - endValue: number, - startTick: number, - endTick: number, - ) => { - if (track === undefined) { - throw new Error("track is undefined") - } - - const minTick = Math.min(startTick, endTick) - const maxTick = Math.max(startTick, endTick) - const _startTick = quantizer.floor(Math.max(0, minTick)) - const _endTick = quantizer.floor(Math.max(0, maxTick)) - - const minValue = Math.min(startValue, endValue) - const maxValue = Math.max(startValue, endValue) - - // linear interpolate - const getValue = - endTick === startTick - ? (_tick: number) => endValue - : (tick: number) => - Math.floor( - Math.min( - maxValue, - Math.max( - minValue, - ((tick - startTick) / (endTick - startTick)) * - (endValue - startValue) + - startValue, - ), - ), - ) - - // Delete events in the dragged area - const events = track.events.filter(filterEvent).filter( - (e) => - // to prevent remove the event created previously, do not remove the event placed at startTick - e.tick !== startTick && - e.tick >= Math.min(minTick, _startTick) && - e.tick <= Math.max(maxTick, _endTick), - ) - - track.transaction((it) => { - it.removeEvents(events.map((e) => e.id)) - - const newEvents = closedRange(_startTick, _endTick, quantizer.unit).map( - (tick) => ({ - ...createEvent(getValue(tick)), - tick, - }), - ) - - it.addEvents(newEvents) - }) - } - -export const updateValueEvents = - (type: ValueEventType) => - ({ pianoRollStore }: RootStore) => - updateEventsInRange( - pianoRollStore.selectedTrack, - pianoRollStore.quantizer, - isValueEvent(type), - createValueEvent(type), - ) - -export const removeEvent = - ({ pianoRollStore: { selectedTrack }, pushHistory }: RootStore) => - (eventId: number) => { - if (selectedTrack === undefined) { - return - } - pushHistory() - selectedTrack.removeEvent(eventId) - } - -/* note */ - -export const createNote = - ({ - pianoRollStore, - pianoRollStore: { quantizer, selectedTrack }, - pushHistory, - song, - }: RootStore) => - (tick: number, noteNumber: number) => { - if (selectedTrack === undefined || selectedTrack.channel == undefined) { - return - } - pushHistory() - - tick = selectedTrack.isRhythmTrack - ? quantizer.round(tick) - : quantizer.floor(tick) - - const duration = selectedTrack.isRhythmTrack - ? song.timebase / 8 // 32th note in the rhythm track - : pianoRollStore.lastNoteDuration ?? quantizer.unit - - const note: Omit = { - type: "channel", - subtype: "note", - noteNumber: noteNumber, - tick, - velocity: 127, - duration, - } - - return selectedTrack.addEvent(note) - } - -export const muteNote = - ({ player, pianoRollStore: { selectedTrack } }: RootStore) => - (noteNumber: number) => { - if (selectedTrack === undefined || selectedTrack.channel == undefined) { - return - } - player.stopNote({ channel: selectedTrack.channel, noteNumber }) - } - -const MIN_DURATION = 10 - -export const resizeNoteLeft = - (rootStore: RootStore) => (id: number, tick: number, quantize: boolean) => { - const { - pianoRollStore, - pianoRollStore: { quantizer, selectedTrack }, - } = rootStore - if (selectedTrack === undefined) { - return - } - // 右端を固定して長さを変更 - // Fix the right end and change the length - if (quantize) { - tick = quantizer.round(tick) - } - const note = selectedTrack.getEventById(id) - if (note == undefined || !isNoteEvent(note)) { - return null - } - const duration = note.duration + (note.tick - tick) - const minDuration = quantize ? quantizer.unit : MIN_DURATION - if (note.tick !== tick && duration >= minDuration) { - pushHistory(rootStore)() - pianoRollStore.lastNoteDuration = duration - resizeNotesInSelectionLeftBy(rootStore)(tick - note.tick) - } - } - -export const resizeNoteRight = - (rootStore: RootStore) => (id: number, tick: number, quantize: boolean) => { - const { - pianoRollStore, - pianoRollStore: { quantizer, selectedTrack }, - } = rootStore - if (selectedTrack === undefined) { - return - } - const note = selectedTrack.getEventById(id) - if (note == undefined || !isNoteEvent(note)) { - return null - } - const right = quantize ? quantizer.round(tick) : tick - const minDuration = quantize ? quantizer.unit : MIN_DURATION - const duration = Math.max(minDuration, right - note.tick) - if (note.duration !== duration) { - pushHistory(rootStore)() - pianoRollStore.lastNoteDuration = duration - resizeNotesInSelectionRightBy(rootStore)(duration - note.duration) - } - } - -/* track meta */ - -export const setTrackName = - ({ pianoRollStore: { selectedTrack }, pushHistory }: RootStore) => - (name: string) => { - if (selectedTrack === undefined) { - return - } - pushHistory() - selectedTrack.setName(name) - } - -export const setTrackVolume = - ({ song, player, pushHistory }: RootStore) => - (trackId: number, volume: number) => { - pushHistory() - const track = song.tracks[trackId] - track.setVolume(volume, player.position) - - if (track.channel !== undefined) { - player.sendEvent(volumeMidiEvent(0, track.channel, volume)) - } - } - -export const setTrackPan = - ({ song, player, pushHistory }: RootStore) => - (trackId: number, pan: number) => { - pushHistory() - const track = song.tracks[trackId] - track.setPan(pan, player.position) - - if (track.channel !== undefined) { - player.sendEvent(panMidiEvent(0, track.channel, pan)) - } - } - -export const setTrackInstrument = - ({ song, player, pushHistory }: RootStore) => - (trackId: number, programNumber: number) => { - pushHistory() - const track = song.tracks[trackId] - track.setProgramNumber(programNumber) - - // 即座に反映する - // Reflect immediately - if (track.channel !== undefined) { - player.sendEvent(programChangeMidiEvent(0, track.channel, programNumber)) - } - } - -export const toogleGhostTrack = - ({ pianoRollStore, pushHistory }: RootStore) => - (trackId: number) => { - pushHistory() - if (pianoRollStore.notGhostTracks.has(trackId)) { - pianoRollStore.notGhostTracks.delete(trackId) - } else { - pianoRollStore.notGhostTracks.add(trackId) - } - } - -export const toogleAllGhostTracks = - ({ song, pianoRollStore, pushHistory }: RootStore) => - () => { - pushHistory() - if ( - pianoRollStore.notGhostTracks.size > Math.floor(song.tracks.length / 2) - ) { - pianoRollStore.notGhostTracks = new Set() - } else { - for (let i = 0; i < song.tracks.length; ++i) { - pianoRollStore.notGhostTracks.add(i) - } - } - } - -export const addTimeSignature = - ({ song, pushHistory }: RootStore) => - (tick: number, numerator: number, denominator: number) => { - const measureStart = getMeasureStart(song, tick) - - const timeSignatureTick = measureStart?.tick ?? 0 - - // prevent duplication - if ( - measureStart !== null && - measureStart.timeSignature.tick === measureStart.tick - ) { - return - } - - pushHistory() - - song.conductorTrack?.addEvent({ - ...timeSignatureMidiEvent(0, numerator, denominator), - tick: timeSignatureTick, - }) - } - -export const updateTimeSignature = - ({ song, pushHistory }: RootStore) => - (id: number, numerator: number, denominator: number) => { - pushHistory() - song.conductorTrack?.updateEvent(id, { - numerator, - denominator, - }) - } diff --git a/spaces/yhshin/kr-article-summarizer/README.md b/spaces/yhshin/kr-article-summarizer/README.md deleted file mode 100644 index 6ade55b6c4e0228e63373a85daea129d9017cdf3..0000000000000000000000000000000000000000 --- a/spaces/yhshin/kr-article-summarizer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Kr Article Summarizer -emoji: 🌍 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/codegen/tokenization_codegen_fast.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/codegen/tokenization_codegen_fast.py deleted file mode 100644 index fad5e24dbcf55c81589cb23d504381c4cab62f66..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/codegen/tokenization_codegen_fast.py +++ /dev/null @@ -1,257 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tokenization classes for OpenAI GPT.""" - - -import json -import re -from typing import TYPE_CHECKING, List, Optional, Tuple, Union - -import numpy as np - -from ...utils import is_tf_available, is_torch_available, logging - - -if TYPE_CHECKING: - if is_torch_available(): - import torch - if is_tf_available(): - import tensorflow as tf - -from tokenizers import pre_tokenizers - -from ...tokenization_utils_base import BatchEncoding -from ...tokenization_utils_fast import PreTrainedTokenizerFast -from .tokenization_codegen import CodeGenTokenizer - - -logger = logging.get_logger(__name__) - -VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} - -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", - }, - "merges_file": { - "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", - }, - "tokenizer_file": { - "Salesforce/codegen-350M-mono": ( - "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" - ), - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "Salesforce/codegen-350M-mono": 2048, -} - - -class CodeGenTokenizerFast(PreTrainedTokenizerFast): - """ - Construct a "fast" CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level - Byte-Pair-Encoding. - - This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will - be encoded differently whether it is at the beginning of the sentence (without space) or not: - - ```python - >>> from transformers import CodeGenTokenizerFast - - >>> tokenizer = CodeGenTokenizerFast.from_pretrained("Salesforce/codegen-350M-mono") - >>> tokenizer("Hello world")["input_ids"] - [15496, 995] - - >>> tokenizer(" Hello world")["input_ids"] - [18435, 995] - ``` - - You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since - the model was not pretrained this way, it might yield a decrease in performance. - - - - When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. - - - - This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should - refer to this superclass for more information regarding those methods. - - Args: - vocab_file (`str`): - Path to the vocabulary file. - merges_file (`str`): - Path to the merges file. - errors (`str`, *optional*, defaults to `"replace"`): - Paradigm to follow when decoding bytes to UTF-8. See - [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. - unk_token (`str`, *optional*, defaults to `<|endoftext|>`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - bos_token (`str`, *optional*, defaults to `<|endoftext|>`): - The beginning of sequence token. - eos_token (`str`, *optional*, defaults to `<|endoftext|>`): - The end of sequence token. - add_prefix_space (`bool`, *optional*, defaults to `False`): - Whether or not to add an initial space to the input. This allows to treat the leading word just as any - other word. (CodeGen tokenizer detect beginning of words by the preceding space). - trim_offsets (`bool`, *optional*, defaults to `True`): - Whether or not the post-processing step should trim offsets to avoid including whitespaces. - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] - slow_tokenizer_class = CodeGenTokenizer - - def __init__( - self, - vocab_file=None, - merges_file=None, - tokenizer_file=None, - unk_token="<|endoftext|>", - bos_token="<|endoftext|>", - eos_token="<|endoftext|>", - add_prefix_space=False, - **kwargs, - ): - super().__init__( - vocab_file, - merges_file, - tokenizer_file=tokenizer_file, - unk_token=unk_token, - bos_token=bos_token, - eos_token=eos_token, - add_prefix_space=add_prefix_space, - **kwargs, - ) - - if kwargs.pop("add_bos_token", False): - model_id = kwargs.pop("name_or_path", "") - raise ValueError( - "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." - "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" - f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" - f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" - "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." - " so that the fast tokenizer works correctly." - ) - - pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) - if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: - pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) - pre_tok_state["add_prefix_space"] = add_prefix_space - self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) - - self.add_prefix_space = add_prefix_space - - def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: - is_split_into_words = kwargs.get("is_split_into_words", False) - assert self.add_prefix_space or not is_split_into_words, ( - f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " - "to use it with pretokenized inputs." - ) - - return super()._batch_encode_plus(*args, **kwargs) - - def _encode_plus(self, *args, **kwargs) -> BatchEncoding: - is_split_into_words = kwargs.get("is_split_into_words", False) - - assert self.add_prefix_space or not is_split_into_words, ( - f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " - "to use it with pretokenized inputs." - ) - - return super()._encode_plus(*args, **kwargs) - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - files = self._tokenizer.model.save(save_directory, name=filename_prefix) - return tuple(files) - - def decode( - self, - token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], - skip_special_tokens: bool = False, - clean_up_tokenization_spaces: bool = None, - truncate_before_pattern: Optional[List[str]] = None, - **kwargs, - ) -> str: - """ - Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special - tokens and clean up tokenization spaces. - - Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. - - Args: - token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): - List of tokenized input ids. Can be obtained using the `__call__` method. - skip_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not to remove special tokens in the decoding. - clean_up_tokenization_spaces (`bool`, *optional*): - Whether or not to clean up the tokenization spaces. If `None`, will default to - `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). - truncate_before_pattern (`List[str]`, *optional*, defaults to `None`): - A list of regular expression strings that will be used to truncate the returned string. This can be - used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning - of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`. - kwargs (additional keyword arguments, *optional*): - Will be passed to the underlying model specific decode method. - - Returns: - `str`: The decoded sentence. - """ - - decoded_text = super().decode( - token_ids=token_ids, - skip_special_tokens=skip_special_tokens, - clean_up_tokenization_spaces=clean_up_tokenization_spaces, - **kwargs, - ) - - if truncate_before_pattern is not None and len(truncate_before_pattern) > 0: - decoded_text = self.truncate(decoded_text, truncate_before_pattern) - - return decoded_text - - def truncate(self, completion, truncate_before_pattern): - def find_re(string, pattern, start_pos): - m = pattern.search(string, start_pos) - return m.start() if m else -1 - - terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern] - - prints = list(re.finditer("^print", completion, re.MULTILINE)) - - if len(prints) > 1: - completion = completion[: prints[1].start()] - - defs = list(re.finditer("^def", completion, re.MULTILINE)) - - if len(defs) > 1: - completion = completion[: defs[1].start()] - - start_pos = 0 - - terminals_pos = [ - pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1 - ] - - if len(terminals_pos) > 0: - return completion[: min(terminals_pos)] - else: - return completion diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/F0Predictor/DioF0Predictor.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/F0Predictor/DioF0Predictor.py deleted file mode 100644 index 4ab27de23cae4dbc282e30f84501afebd1a37518..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/F0Predictor/DioF0Predictor.py +++ /dev/null @@ -1,85 +0,0 @@ -from modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - -class DioF0Predictor(F0Predictor): - def __init__(self,hop_length=512,f0_min=50,f0_max=1100,sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self,f0): - ''' - 对F0进行插值处理 - ''' - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] #这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:,0], vuv_vector[:,0] - - def resize_f0(self,x, target_len): - source = np.array(x) - source[source<0.001] = np.nan - target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source) - res = np.nan_to_num(target) - return res - - def compute_f0(self,wav,p_len=None): - if p_len is None: - p_len = wav.shape[0]//self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self,wav,p_len=None): - if p_len is None: - p_len = wav.shape[0]//self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/hifigan/utils.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/hifigan/utils.py deleted file mode 100644 index 9c93c996d3cc73c30d71c1fc47056e4230f35c0f..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/hifigan/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import glob -import os -import matplotlib -import torch -from torch.nn.utils import weight_norm -# matplotlib.use("Agg") -import matplotlib.pylab as plt - - -def plot_spectrogram(spectrogram): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def apply_weight_norm(m): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - weight_norm(m) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def save_checkpoint(filepath, obj): - print("Saving checkpoint to {}".format(filepath)) - torch.save(obj, filepath) - print("Complete.") - - -def del_old_checkpoints(cp_dir, prefix, n_models=2): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) # get checkpoint paths - cp_list = sorted(cp_list)# sort by iter - if len(cp_list) > n_models: # if more than n_models models are found - for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models - open(cp, 'w').close()# empty file contents - os.unlink(cp)# delete file (move to trash when using Colab) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return None - return sorted(cp_list)[-1] - diff --git a/spaces/ymcmy/highlighter_demo/README.md b/spaces/ymcmy/highlighter_demo/README.md deleted file mode 100644 index f9e31dfa91ecd7e2c595cb4f7b4b78cb02350180..0000000000000000000000000000000000000000 --- a/spaces/ymcmy/highlighter_demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Highlighter Demo -emoji: 🐢 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ysharma/bokeh_plot_diffusers/README.md b/spaces/ysharma/bokeh_plot_diffusers/README.md deleted file mode 100644 index c672fcde7177ced940827730b33b0db1b3967cf4..0000000000000000000000000000000000000000 --- a/spaces/ysharma/bokeh_plot_diffusers/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Bokeh Plot Diffusers -emoji: 🌖 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yukie/yukie-sovits3/models.py b/spaces/yukie/yukie-sovits3/models.py deleted file mode 100644 index bdbce8445304abda792f235a4761b831fd6f4d12..0000000000000000000000000000000000000000 --- a/spaces/yukie/yukie-sovits3/models.py +++ /dev/null @@ -1,351 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import attentions -import commons -import modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_lengths, f0=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = x + self.f0_emb(f0).transpose(1,2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - - return z, m, logs, x_mask - - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.enc_p_ = TextEncoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16,0, filter_channels, n_heads, p_dropout) - hps = { - "sampling_rate": 32000, - "inter_channels": 192, - "resblock": "1", - "resblock_kernel_sizes": [3, 7, 11], - "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - "upsample_rates": [10, 8, 2, 2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [16, 16, 4, 4], - "gin_channels": 256, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - def forward(self, c, f0, spec, g=None, mel=None, c_lengths=None, spec_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - if spec_lengths == None: - spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device) - - g = self.emb_g(g).transpose(1,2) - - z_ptemp, m_p, logs_p, _ = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0)) - z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g) - - z_p = self.flow(z, spec_mask, g=g) - z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size) - - # o = self.dec(z_slice, g=g) - o = self.dec(z_slice, g=g, f0=pitch_slice) - - return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, c, f0, g=None, mel=None, c_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - g = self.emb_g(g).transpose(1,2) - - z_p, m_p, logs_p, c_mask = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0)) - z = self.flow(z_p, c_mask, g=g, reverse=True) - - o = self.dec(z * c_mask, g=g, f0=f0) - - return o diff --git a/spaces/zhenwusw/JoJoGAN/e4e/criteria/w_norm.py b/spaces/zhenwusw/JoJoGAN/e4e/criteria/w_norm.py deleted file mode 100644 index a45ab6f67d8a3f7051be4b7236fa2f38446fd2c1..0000000000000000000000000000000000000000 --- a/spaces/zhenwusw/JoJoGAN/e4e/criteria/w_norm.py +++ /dev/null @@ -1,14 +0,0 @@ -import torch -from torch import nn - - -class WNormLoss(nn.Module): - - def __init__(self, start_from_latent_avg=True): - super(WNormLoss, self).__init__() - self.start_from_latent_avg = start_from_latent_avg - - def forward(self, latent, latent_avg=None): - if self.start_from_latent_avg: - latent = latent - latent_avg - return torch.sum(latent.norm(2, dim=(1, 2))) / latent.shape[0] diff --git a/spaces/zhoucr/ai-koni/inference.py b/spaces/zhoucr/ai-koni/inference.py deleted file mode 100644 index d748e5347837a86778ed9769ed9297ac61c8ae3d..0000000000000000000000000000000000000000 --- a/spaces/zhoucr/ai-koni/inference.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - - -import json -import math -import torch -from torch import nn -from torch.nn import functional as F -from torch.utils.data import DataLoader - -import commons -import utils -from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate -from models import SynthesizerTrn -from text.symbols import symbols -from text import text_to_sequence, cleaned_text_to_sequence -from text.cleaners import japanese_cleaners -from scipy.io.wavfile import write - - - -def get_text(text, hps): - text_norm = text_to_sequence(text, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - # print(text_norm.shape) - return text_norm - -hps = utils.get_hparams_from_file("/mnt/vits_koni/configs/japanese_base.json") - -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model).cuda() -_ = net_g.eval() - - -_ = utils.load_checkpoint("/mnt/vits_koni/MyDrive/japanese_base/G_42000.pth", net_g, None) - - -def tts(text): - if len(text) > 150: - return "Error: Text is too long", None - stn_tst = get_text(text, hps) - - with torch.no_grad(): - x_tst = stn_tst.cuda().unsqueeze(0) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda() - # print(stn_tst.size()) - audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=2)[0][ - 0, 0].data.cpu().float().numpy() - return hps.data.sampling_rate, audio - -sampling_rate, infer_audio = tts("にーまーまーすーろーぁ") -write("/mnt/vits_koni/MyDrive/japanese_base/inferwav/konitest3.wav", sampling_rate, infer_audio) -print("1") - - diff --git a/spaces/zzzzred/extras/server.py b/spaces/zzzzred/extras/server.py deleted file mode 100644 index 2c5301cc39a5a4767014b3873111b2a592855d0d..0000000000000000000000000000000000000000 --- a/spaces/zzzzred/extras/server.py +++ /dev/null @@ -1,964 +0,0 @@ -from functools import wraps -from flask import ( - Flask, - jsonify, - request, - Response, - render_template_string, - abort, - send_from_directory, - send_file, -) -from flask_cors import CORS -from flask_compress import Compress -import markdown -import argparse -from transformers import AutoTokenizer, AutoProcessor, pipeline -from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM -from transformers import BlipForConditionalGeneration -import unicodedata -import torch -import time -import os -import gc -import sys -import secrets -from PIL import Image -import base64 -from io import BytesIO -from random import randint -import webuiapi -import hashlib -from constants import * -from colorama import Fore, Style, init as colorama_init - -colorama_init() - -if sys.hexversion < 0x030b0000: - print(f"{Fore.BLUE}{Style.BRIGHT}Python 3.11 or newer is recommended to run this program.{Style.RESET_ALL}") - time.sleep(2) - -class SplitArgs(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - setattr( - namespace, self.dest, values.replace('"', "").replace("'", "").split(",") - ) - -#Setting Root Folders for Silero Generations so it is compatible with STSL, should not effect regular runs. - Rolyat -parent_dir = os.path.dirname(os.path.abspath(__file__)) -SILERO_SAMPLES_PATH = os.path.join(parent_dir, "tts_samples") -SILERO_SAMPLE_TEXT = os.path.join(parent_dir) - -# Create directories if they don't exist -if not os.path.exists(SILERO_SAMPLES_PATH): - os.makedirs(SILERO_SAMPLES_PATH) -if not os.path.exists(SILERO_SAMPLE_TEXT): - os.makedirs(SILERO_SAMPLE_TEXT) - -# Script arguments -parser = argparse.ArgumentParser( - prog="SillyTavern Extras", description="Web API for transformers models" -) -parser.add_argument( - "--port", type=int, help="Specify the port on which the application is hosted" -) -parser.add_argument( - "--listen", action="store_true", help="Host the app on the local network" -) -parser.add_argument( - "--share", action="store_true", help="Share the app on CloudFlare tunnel" -) -parser.add_argument("--cpu", action="store_true", help="Run the models on the CPU") -parser.add_argument("--cuda", action="store_false", dest="cpu", help="Run the models on the GPU") -parser.add_argument("--cuda-device", help="Specify the CUDA device to use") -parser.add_argument("--mps", "--apple", "--m1", "--m2", action="store_false", dest="cpu", help="Run the models on Apple Silicon") -parser.set_defaults(cpu=True) -parser.add_argument("--summarization-model", help="Load a custom summarization model") -parser.add_argument( - "--classification-model", help="Load a custom text classification model" -) -parser.add_argument("--captioning-model", help="Load a custom captioning model") -parser.add_argument("--embedding-model", help="Load a custom text embedding model") -parser.add_argument("--chroma-host", help="Host IP for a remote ChromaDB instance") -parser.add_argument("--chroma-port", help="HTTP port for a remote ChromaDB instance (defaults to 8000)") -parser.add_argument("--chroma-folder", help="Path for chromadb persistence folder", default='.chroma_db') -parser.add_argument('--chroma-persist', help="ChromaDB persistence", default=True, action=argparse.BooleanOptionalAction) -parser.add_argument( - "--secure", action="store_true", help="Enforces the use of an API key" -) -sd_group = parser.add_mutually_exclusive_group() - -local_sd = sd_group.add_argument_group("sd-local") -local_sd.add_argument("--sd-model", help="Load a custom SD image generation model") -local_sd.add_argument("--sd-cpu", help="Force the SD pipeline to run on the CPU", action="store_true") - -remote_sd = sd_group.add_argument_group("sd-remote") -remote_sd.add_argument( - "--sd-remote", action="store_true", help="Use a remote backend for SD" -) -remote_sd.add_argument( - "--sd-remote-host", type=str, help="Specify the host of the remote SD backend" -) -remote_sd.add_argument( - "--sd-remote-port", type=int, help="Specify the port of the remote SD backend" -) -remote_sd.add_argument( - "--sd-remote-ssl", action="store_true", help="Use SSL for the remote SD backend" -) -remote_sd.add_argument( - "--sd-remote-auth", - type=str, - help="Specify the username:password for the remote SD backend (if required)", -) - -parser.add_argument( - "--enable-modules", - action=SplitArgs, - default=[], - help="Override a list of enabled modules", -) - -args = parser.parse_args() -# [HF, Huggingface] Set port to 7860, set host to remote. -port = 7860 -host = "0.0.0.0" -summarization_model = ( - args.summarization_model - if args.summarization_model - else DEFAULT_SUMMARIZATION_MODEL -) -classification_model = ( - args.classification_model - if args.classification_model - else DEFAULT_CLASSIFICATION_MODEL -) -captioning_model = ( - args.captioning_model if args.captioning_model else DEFAULT_CAPTIONING_MODEL -) -embedding_model = ( - args.embedding_model if args.embedding_model else DEFAULT_EMBEDDING_MODEL -) - -sd_use_remote = False if args.sd_model else True -sd_model = args.sd_model if args.sd_model else DEFAULT_SD_MODEL -sd_remote_host = args.sd_remote_host if args.sd_remote_host else DEFAULT_REMOTE_SD_HOST -sd_remote_port = args.sd_remote_port if args.sd_remote_port else DEFAULT_REMOTE_SD_PORT -sd_remote_ssl = args.sd_remote_ssl -sd_remote_auth = args.sd_remote_auth - -modules = ( - args.enable_modules if args.enable_modules and len(args.enable_modules) > 0 else [] -) - -if len(modules) == 0: - print( - f"{Fore.RED}{Style.BRIGHT}You did not select any modules to run! Choose them by adding an --enable-modules option" - ) - print(f"Example: --enable-modules=caption,summarize{Style.RESET_ALL}") - -# Models init -cuda_device = DEFAULT_CUDA_DEVICE if not args.cuda_device else args.cuda_device -device_string = cuda_device if torch.cuda.is_available() and not args.cpu else 'mps' if torch.backends.mps.is_available() and not args.cpu else 'cpu' -device = torch.device(device_string) -torch_dtype = torch.float32 if device_string != cuda_device else torch.float16 - -if not torch.cuda.is_available() and not args.cpu: - print(f"{Fore.YELLOW}{Style.BRIGHT}torch-cuda is not supported on this device.{Style.RESET_ALL}") - if not torch.backends.mps.is_available() and not args.cpu: - print(f"{Fore.YELLOW}{Style.BRIGHT}torch-mps is not supported on this device.{Style.RESET_ALL}") - - -print(f"{Fore.GREEN}{Style.BRIGHT}Using torch device: {device_string}{Style.RESET_ALL}") - -if "caption" in modules: - print("Initializing an image captioning model...") - captioning_processor = AutoProcessor.from_pretrained(captioning_model) - if "blip" in captioning_model: - captioning_transformer = BlipForConditionalGeneration.from_pretrained( - captioning_model, torch_dtype=torch_dtype - ).to(device) - else: - captioning_transformer = AutoModelForCausalLM.from_pretrained( - captioning_model, torch_dtype=torch_dtype - ).to(device) - -if "summarize" in modules: - print("Initializing a text summarization model...") - summarization_tokenizer = AutoTokenizer.from_pretrained(summarization_model) - summarization_transformer = AutoModelForSeq2SeqLM.from_pretrained( - summarization_model, torch_dtype=torch_dtype - ).to(device) - -if "classify" in modules: - print("Initializing a sentiment classification pipeline...") - classification_pipe = pipeline( - "text-classification", - model=classification_model, - top_k=None, - device=device, - torch_dtype=torch_dtype, - ) - -if "sd" in modules and not sd_use_remote: - from diffusers import StableDiffusionPipeline - from diffusers import EulerAncestralDiscreteScheduler - - print("Initializing Stable Diffusion pipeline...") - sd_device_string = cuda_device if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' - sd_device = torch.device(sd_device_string) - sd_torch_dtype = torch.float32 if sd_device_string != cuda_device else torch.float16 - sd_pipe = StableDiffusionPipeline.from_pretrained( - sd_model, custom_pipeline="lpw_stable_diffusion", torch_dtype=sd_torch_dtype - ).to(sd_device) - sd_pipe.safety_checker = lambda images, clip_input: (images, False) - sd_pipe.enable_attention_slicing() - # pipe.scheduler = KarrasVeScheduler.from_config(pipe.scheduler.config) - sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config( - sd_pipe.scheduler.config - ) -elif "sd" in modules and sd_use_remote: - print("Initializing Stable Diffusion connection") - try: - sd_remote = webuiapi.WebUIApi( - host=sd_remote_host, port=sd_remote_port, use_https=sd_remote_ssl - ) - if sd_remote_auth: - username, password = sd_remote_auth.split(":") - sd_remote.set_auth(username, password) - sd_remote.util_wait_for_ready() - except Exception as e: - # remote sd from modules - print( - f"{Fore.RED}{Style.BRIGHT}Could not connect to remote SD backend at http{'s' if sd_remote_ssl else ''}://{sd_remote_host}:{sd_remote_port}! Disabling SD module...{Style.RESET_ALL}" - ) - modules.remove("sd") - -if "tts" in modules: - print("tts module is deprecated. Please use silero-tts instead.") - modules.remove("tts") - modules.append("silero-tts") - - -if "silero-tts" in modules: - if not os.path.exists(SILERO_SAMPLES_PATH): - os.makedirs(SILERO_SAMPLES_PATH) - print("Initializing Silero TTS server") - from silero_api_server import tts - - tts_service = tts.SileroTtsService(SILERO_SAMPLES_PATH) - if len(os.listdir(SILERO_SAMPLES_PATH)) == 0: - print("Generating Silero TTS samples...") - tts_service.update_sample_text(SILERO_SAMPLE_TEXT) - tts_service.generate_samples() - - -if "edge-tts" in modules: - print("Initializing Edge TTS client") - import tts_edge as edge - - -if "chromadb" in modules: - print("Initializing ChromaDB") - import chromadb - import posthog - from chromadb.config import Settings - from sentence_transformers import SentenceTransformer - - # Assume that the user wants in-memory unless a host is specified - # Also disable chromadb telemetry - posthog.capture = lambda *args, **kwargs: None - if args.chroma_host is None: - if args.chroma_persist: - chromadb_client = chromadb.PersistentClient(path=args.chroma_folder, settings=Settings(anonymized_telemetry=False)) - print(f"ChromaDB is running in-memory with persistence. Persistence is stored in {args.chroma_folder}. Can be cleared by deleting the folder or purging db.") - else: - chromadb_client = chromadb.EphemeralClient(Settings(anonymized_telemetry=False)) - print(f"ChromaDB is running in-memory without persistence.") - else: - chroma_port=( - args.chroma_port if args.chroma_port else DEFAULT_CHROMA_PORT - ) - chromadb_client = chromadb.HttpClient(host=args.chroma_host, port=chroma_port, settings=Settings(anonymized_telemetry=False)) - print(f"ChromaDB is remotely configured at {args.chroma_host}:{chroma_port}") - - chromadb_embedder = SentenceTransformer(embedding_model, device=device_string) - chromadb_embed_fn = lambda *args, **kwargs: chromadb_embedder.encode(*args, **kwargs).tolist() - - # Check if the db is connected and running, otherwise tell the user - try: - chromadb_client.heartbeat() - print("Successfully pinged ChromaDB! Your client is successfully connected.") - except: - print("Could not ping ChromaDB! If you are running remotely, please check your host and port!") - -# Flask init -app = Flask(__name__) -CORS(app) # allow cross-domain requests -Compress(app) # compress responses -app.config["MAX_CONTENT_LENGTH"] = 100 * 1024 * 1024 - - -def require_module(name): - def wrapper(fn): - @wraps(fn) - def decorated_view(*args, **kwargs): - if name not in modules: - abort(403, "Module is disabled by config") - return fn(*args, **kwargs) - - return decorated_view - - return wrapper - - -# AI stuff -def classify_text(text: str) -> list: - output = classification_pipe( - text, - truncation=True, - max_length=classification_pipe.model.config.max_position_embeddings, - )[0] - return sorted(output, key=lambda x: x["score"], reverse=True) - - -def caption_image(raw_image: Image, max_new_tokens: int = 20) -> str: - inputs = captioning_processor(raw_image.convert("RGB"), return_tensors="pt").to( - device, torch_dtype - ) - outputs = captioning_transformer.generate(**inputs, max_new_tokens=max_new_tokens) - caption = captioning_processor.decode(outputs[0], skip_special_tokens=True) - return caption - - -def summarize_chunks(text: str, params: dict) -> str: - try: - return summarize(text, params) - except IndexError: - print( - "Sequence length too large for model, cutting text in half and calling again" - ) - new_params = params.copy() - new_params["max_length"] = new_params["max_length"] // 2 - new_params["min_length"] = new_params["min_length"] // 2 - return summarize_chunks( - text[: (len(text) // 2)], new_params - ) + summarize_chunks(text[(len(text) // 2) :], new_params) - - -def summarize(text: str, params: dict) -> str: - # Tokenize input - inputs = summarization_tokenizer(text, return_tensors="pt").to(device) - token_count = len(inputs[0]) - - bad_words_ids = [ - summarization_tokenizer(bad_word, add_special_tokens=False).input_ids - for bad_word in params["bad_words"] - ] - summary_ids = summarization_transformer.generate( - inputs["input_ids"], - num_beams=2, - max_new_tokens=max(token_count, int(params["max_length"])), - min_new_tokens=min(token_count, int(params["min_length"])), - repetition_penalty=float(params["repetition_penalty"]), - temperature=float(params["temperature"]), - length_penalty=float(params["length_penalty"]), - bad_words_ids=bad_words_ids, - ) - summary = summarization_tokenizer.batch_decode( - summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True - )[0] - summary = normalize_string(summary) - return summary - - -def normalize_string(input: str) -> str: - output = " ".join(unicodedata.normalize("NFKC", input).strip().split()) - return output - - -def generate_image(data: dict) -> Image: - prompt = normalize_string(f'{data["prompt_prefix"]} {data["prompt"]}') - - if sd_use_remote: - image = sd_remote.txt2img( - prompt=prompt, - negative_prompt=data["negative_prompt"], - sampler_name=data["sampler"], - steps=data["steps"], - cfg_scale=data["scale"], - width=data["width"], - height=data["height"], - restore_faces=data["restore_faces"], - enable_hr=data["enable_hr"], - save_images=True, - send_images=True, - do_not_save_grid=False, - do_not_save_samples=False, - ).image - else: - image = sd_pipe( - prompt=prompt, - negative_prompt=data["negative_prompt"], - num_inference_steps=data["steps"], - guidance_scale=data["scale"], - width=data["width"], - height=data["height"], - ).images[0] - - image.save("./debug.png") - return image - - -def image_to_base64(image: Image, quality: int = 75) -> str: - buffer = BytesIO() - image.convert("RGB") - image.save(buffer, format="JPEG", quality=quality) - img_str = base64.b64encode(buffer.getvalue()).decode("utf-8") - return img_str - - -ignore_auth = [] -# [HF, Huggingface] Get password instead of text file. -api_key = os.environ.get("password") - -def is_authorize_ignored(request): - view_func = app.view_functions.get(request.endpoint) - - if view_func is not None: - if view_func in ignore_auth: - return True - return False - -@app.before_request -def before_request(): - # Request time measuring - request.start_time = time.time() - - # Checks if an API key is present and valid, otherwise return unauthorized - # The options check is required so CORS doesn't get angry - try: - if request.method != 'OPTIONS' and is_authorize_ignored(request) == False and getattr(request.authorization, 'token', '') != api_key: - print(f"WARNING: Unauthorized API key access from {request.remote_addr}") - if request.method == 'POST': - print(f"Incoming POST request with {request.headers.get('Authorization')}") - response = jsonify({ 'error': '401: Invalid API key' }) - response.status_code = 401 - return "https://(hf_name)-(space_name).hf.space/" - except Exception as e: - print(f"API key check error: {e}") - return "https://(hf_name)-(space_name).hf.space/" - - -@app.after_request -def after_request(response): - duration = time.time() - request.start_time - response.headers["X-Request-Duration"] = str(duration) - return response - - -@app.route("/", methods=["GET"]) -def index(): - with open("./README.md", "r", encoding="utf8") as f: - content = f.read() - return render_template_string(markdown.markdown(content, extensions=["tables"])) - - -@app.route("/api/extensions", methods=["GET"]) -def get_extensions(): - extensions = dict( - { - "extensions": [ - { - "name": "not-supported", - "metadata": { - "display_name": """Extensions serving using Extensions API is no longer supported. Please update the mod from: https://github.com/Cohee1207/SillyTavern""", - "requires": [], - "assets": [], - }, - } - ] - } - ) - return jsonify(extensions) - - -@app.route("/api/caption", methods=["POST"]) -@require_module("caption") -def api_caption(): - data = request.get_json() - - if "image" not in data or not isinstance(data["image"], str): - abort(400, '"image" is required') - - image = Image.open(BytesIO(base64.b64decode(data["image"]))) - image = image.convert("RGB") - image.thumbnail((512, 512)) - caption = caption_image(image) - thumbnail = image_to_base64(image) - print("Caption:", caption, sep="\n") - gc.collect() - return jsonify({"caption": caption, "thumbnail": thumbnail}) - - -@app.route("/api/summarize", methods=["POST"]) -@require_module("summarize") -def api_summarize(): - data = request.get_json() - - if "text" not in data or not isinstance(data["text"], str): - abort(400, '"text" is required') - - params = DEFAULT_SUMMARIZE_PARAMS.copy() - - if "params" in data and isinstance(data["params"], dict): - params.update(data["params"]) - - print("Summary input:", data["text"], sep="\n") - summary = summarize_chunks(data["text"], params) - print("Summary output:", summary, sep="\n") - gc.collect() - return jsonify({"summary": summary}) - - -@app.route("/api/classify", methods=["POST"]) -@require_module("classify") -def api_classify(): - data = request.get_json() - - if "text" not in data or not isinstance(data["text"], str): - abort(400, '"text" is required') - - print("Classification input:", data["text"], sep="\n") - classification = classify_text(data["text"]) - print("Classification output:", classification, sep="\n") - gc.collect() - return jsonify({"classification": classification}) - - -@app.route("/api/classify/labels", methods=["GET"]) -@require_module("classify") -def api_classify_labels(): - classification = classify_text("") - labels = [x["label"] for x in classification] - return jsonify({"labels": labels}) - - -@app.route("/api/image", methods=["POST"]) -@require_module("sd") -def api_image(): - required_fields = { - "prompt": str, - } - - optional_fields = { - "steps": 30, - "scale": 6, - "sampler": "DDIM", - "width": 512, - "height": 512, - "restore_faces": False, - "enable_hr": False, - "prompt_prefix": PROMPT_PREFIX, - "negative_prompt": NEGATIVE_PROMPT, - } - - data = request.get_json() - - # Check required fields - for field, field_type in required_fields.items(): - if field not in data or not isinstance(data[field], field_type): - abort(400, f'"{field}" is required') - - # Set optional fields to default values if not provided - for field, default_value in optional_fields.items(): - type_match = ( - (int, float) - if isinstance(default_value, (int, float)) - else type(default_value) - ) - if field not in data or not isinstance(data[field], type_match): - data[field] = default_value - - try: - print("SD inputs:", data, sep="\n") - image = generate_image(data) - base64image = image_to_base64(image, quality=90) - return jsonify({"image": base64image}) - except RuntimeError as e: - abort(400, str(e)) - - -@app.route("/api/image/model", methods=["POST"]) -@require_module("sd") -def api_image_model_set(): - data = request.get_json() - - if not sd_use_remote: - abort(400, "Changing model for local sd is not supported.") - if "model" not in data or not isinstance(data["model"], str): - abort(400, '"model" is required') - - old_model = sd_remote.util_get_current_model() - sd_remote.util_set_model(data["model"], find_closest=False) - # sd_remote.util_set_model(data['model']) - sd_remote.util_wait_for_ready() - new_model = sd_remote.util_get_current_model() - - return jsonify({"previous_model": old_model, "current_model": new_model}) - - -@app.route("/api/image/model", methods=["GET"]) -@require_module("sd") -def api_image_model_get(): - model = sd_model - - if sd_use_remote: - model = sd_remote.util_get_current_model() - - return jsonify({"model": model}) - - -@app.route("/api/image/models", methods=["GET"]) -@require_module("sd") -def api_image_models(): - models = [sd_model] - - if sd_use_remote: - models = sd_remote.util_get_model_names() - - return jsonify({"models": models}) - - -@app.route("/api/image/samplers", methods=["GET"]) -@require_module("sd") -def api_image_samplers(): - samplers = ["Euler a"] - - if sd_use_remote: - samplers = [sampler["name"] for sampler in sd_remote.get_samplers()] - - return jsonify({"samplers": samplers}) - - -@app.route("/api/modules", methods=["GET"]) -def get_modules(): - return jsonify({"modules": modules}) - - -@app.route("/api/tts/speakers", methods=["GET"]) -@require_module("silero-tts") -def tts_speakers(): - voices = [ - { - "name": speaker, - "voice_id": speaker, - "preview_url": f"{str(request.url_root)}api/tts/sample/{speaker}", - } - for speaker in tts_service.get_speakers() - ] - return jsonify(voices) - -# Added fix for Silero not working as new files were unable to be created if one already existed. - Rolyat 7/7/23 -@app.route("/api/tts/generate", methods=["POST"]) -@require_module("silero-tts") -def tts_generate(): - voice = request.get_json() - if "text" not in voice or not isinstance(voice["text"], str): - abort(400, '"text" is required') - if "speaker" not in voice or not isinstance(voice["speaker"], str): - abort(400, '"speaker" is required') - # Remove asterisks - voice["text"] = voice["text"].replace("*", "") - try: - # Remove the destination file if it already exists - if os.path.exists('test.wav'): - os.remove('test.wav') - - audio = tts_service.generate(voice["speaker"], voice["text"]) - audio_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.basename(audio)) - - os.rename(audio, audio_file_path) - return send_file(audio_file_path, mimetype="audio/x-wav") - except Exception as e: - print(e) - abort(500, voice["speaker"]) - - -@app.route("/api/tts/sample/", methods=["GET"]) -@require_module("silero-tts") -def tts_play_sample(speaker: str): - return send_from_directory(SILERO_SAMPLES_PATH, f"{speaker}.wav") - - -@app.route("/api/edge-tts/list", methods=["GET"]) -@require_module("edge-tts") -def edge_tts_list(): - voices = edge.get_voices() - return jsonify(voices) - - -@app.route("/api/edge-tts/generate", methods=["POST"]) -@require_module("edge-tts") -def edge_tts_generate(): - data = request.get_json() - if "text" not in data or not isinstance(data["text"], str): - abort(400, '"text" is required') - if "voice" not in data or not isinstance(data["voice"], str): - abort(400, '"voice" is required') - if "rate" in data and isinstance(data['rate'], int): - rate = data['rate'] - else: - rate = 0 - # Remove asterisks - data["text"] = data["text"].replace("*", "") - try: - audio = edge.generate_audio(text=data["text"], voice=data["voice"], rate=rate) - return Response(audio, mimetype="audio/mpeg") - except Exception as e: - print(e) - abort(500, data["voice"]) - - -@app.route("/api/chromadb", methods=["POST"]) -@require_module("chromadb") -def chromadb_add_messages(): - data = request.get_json() - if "chat_id" not in data or not isinstance(data["chat_id"], str): - abort(400, '"chat_id" is required') - if "messages" not in data or not isinstance(data["messages"], list): - abort(400, '"messages" is required') - - chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest() - collection = chromadb_client.get_or_create_collection( - name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn - ) - - documents = [m["content"] for m in data["messages"]] - ids = [m["id"] for m in data["messages"]] - metadatas = [ - {"role": m["role"], "date": m["date"], "meta": m.get("meta", "")} - for m in data["messages"] - ] - - collection.upsert( - ids=ids, - documents=documents, - metadatas=metadatas, - ) - - return jsonify({"count": len(ids)}) - - -@app.route("/api/chromadb/purge", methods=["POST"]) -@require_module("chromadb") -def chromadb_purge(): - data = request.get_json() - if "chat_id" not in data or not isinstance(data["chat_id"], str): - abort(400, '"chat_id" is required') - - chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest() - collection = chromadb_client.get_or_create_collection( - name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn - ) - - count = collection.count() - collection.delete() - print("ChromaDB embeddings deleted", count) - return 'Ok', 200 - - -@app.route("/api/chromadb/query", methods=["POST"]) -@require_module("chromadb") -def chromadb_query(): - data = request.get_json() - if "chat_id" not in data or not isinstance(data["chat_id"], str): - abort(400, '"chat_id" is required') - if "query" not in data or not isinstance(data["query"], str): - abort(400, '"query" is required') - - if "n_results" not in data or not isinstance(data["n_results"], int): - n_results = 1 - else: - n_results = data["n_results"] - - chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest() - collection = chromadb_client.get_or_create_collection( - name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn - ) - - if collection.count() == 0: - print(f"Queried empty/missing collection for {repr(data['chat_id'])}.") - return jsonify([]) - - - n_results = min(collection.count(), n_results) - query_result = collection.query( - query_texts=[data["query"]], - n_results=n_results, - ) - - documents = query_result["documents"][0] - ids = query_result["ids"][0] - metadatas = query_result["metadatas"][0] - distances = query_result["distances"][0] - - messages = [ - { - "id": ids[i], - "date": metadatas[i]["date"], - "role": metadatas[i]["role"], - "meta": metadatas[i]["meta"], - "content": documents[i], - "distance": distances[i], - } - for i in range(len(ids)) - ] - - return jsonify(messages) - -@app.route("/api/chromadb/multiquery", methods=["POST"]) -@require_module("chromadb") -def chromadb_multiquery(): - data = request.get_json() - if "chat_list" not in data or not isinstance(data["chat_list"], list): - abort(400, '"chat_list" is required and should be a list') - if "query" not in data or not isinstance(data["query"], str): - abort(400, '"query" is required') - - if "n_results" not in data or not isinstance(data["n_results"], int): - n_results = 1 - else: - n_results = data["n_results"] - - messages = [] - - for chat_id in data["chat_list"]: - if not isinstance(chat_id, str): - continue - - try: - chat_id_md5 = hashlib.md5(chat_id.encode()).hexdigest() - collection = chromadb_client.get_collection( - name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn - ) - - # Skip this chat if the collection is empty - if collection.count() == 0: - continue - - n_results_per_chat = min(collection.count(), n_results) - query_result = collection.query( - query_texts=[data["query"]], - n_results=n_results_per_chat, - ) - documents = query_result["documents"][0] - ids = query_result["ids"][0] - metadatas = query_result["metadatas"][0] - distances = query_result["distances"][0] - - chat_messages = [ - { - "id": ids[i], - "date": metadatas[i]["date"], - "role": metadatas[i]["role"], - "meta": metadatas[i]["meta"], - "content": documents[i], - "distance": distances[i], - } - for i in range(len(ids)) - ] - - messages.extend(chat_messages) - except Exception as e: - print(e) - - #remove duplicate msgs, filter down to the right number - seen = set() - messages = [d for d in messages if not (d['content'] in seen or seen.add(d['content']))] - messages = sorted(messages, key=lambda x: x['distance'])[0:n_results] - - return jsonify(messages) - - -@app.route("/api/chromadb/export", methods=["POST"]) -@require_module("chromadb") -def chromadb_export(): - data = request.get_json() - if "chat_id" not in data or not isinstance(data["chat_id"], str): - abort(400, '"chat_id" is required') - - chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest() - try: - collection = chromadb_client.get_collection( - name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn - ) - except Exception as e: - print(e) - abort(400, "Chat collection not found in chromadb") - - collection_content = collection.get() - documents = collection_content.get('documents', []) - ids = collection_content.get('ids', []) - metadatas = collection_content.get('metadatas', []) - - unsorted_content = [ - { - "id": ids[i], - "metadata": metadatas[i], - "document": documents[i], - } - for i in range(len(ids)) - ] - - sorted_content = sorted(unsorted_content, key=lambda x: x['metadata']['date']) - - export = { - "chat_id": data["chat_id"], - "content": sorted_content - } - - return jsonify(export) - -@app.route("/api/chromadb/import", methods=["POST"]) -@require_module("chromadb") -def chromadb_import(): - data = request.get_json() - content = data['content'] - if "chat_id" not in data or not isinstance(data["chat_id"], str): - abort(400, '"chat_id" is required') - - chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest() - collection = chromadb_client.get_or_create_collection( - name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn - ) - - documents = [item['document'] for item in content] - metadatas = [item['metadata'] for item in content] - ids = [item['id'] for item in content] - - - collection.upsert(documents=documents, metadatas=metadatas, ids=ids) - print(f"Imported {len(ids)} (total {collection.count()}) content entries into {repr(data['chat_id'])}") - - return jsonify({"count": len(ids)}) - - -if args.share: - from flask_cloudflared import _run_cloudflared - import inspect - - sig = inspect.signature(_run_cloudflared) - sum = sum( - 1 - for param in sig.parameters.values() - if param.kind == param.POSITIONAL_OR_KEYWORD - ) - if sum > 1: - metrics_port = randint(8100, 9000) - cloudflare = _run_cloudflared(port, metrics_port) - else: - cloudflare = _run_cloudflared(port) - print("Running on", cloudflare) - -ignore_auth.append(tts_play_sample) -app.run(host=host, port=port)